本文整理汇总了Java中com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions.setStagingLocation方法的典型用法代码示例。如果您正苦于以下问题:Java DataflowPipelineOptions.setStagingLocation方法的具体用法?Java DataflowPipelineOptions.setStagingLocation怎么用?Java DataflowPipelineOptions.setStagingLocation使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions
的用法示例。
在下文中一共展示了DataflowPipelineOptions.setStagingLocation方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: pipelineOptions
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
/**
* Create Dataflow Pipeline options from the standard command-line options, "--project=",
* "--runner=" and "--stagingLocation="
*
* @param args
* @return
* @throws IOException
*/
public static DataflowPipelineOptions pipelineOptions(String[] args) throws IOException {
LOG.info("Set up Dataflow options");
DataflowPipelineOptions o = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
Map<String, String> m = StringUtils.parseArgs(args);
o.setProject(m.get(PROJECT));
if (m.containsKey(STAGING)) {
o.setStagingLocation(m.get(STAGING));
} else if (m.containsKey(STAGING_LOCATION)) {
o.setStagingLocation(m.get(STAGING_LOCATION));
} else if (m.containsKey(WORKSPACE)) {
o.setStagingLocation(m.get(WORKSPACE) + "/staging");
}
o.setRunner(runner(m.get(RUNNER)));
o.setMaxNumWorkers(m.get(MAX_WORKERS) == null ? 1 : Integer.parseInt(m.get(MAX_WORKERS)));
if (m.containsKey(MACHINE_TYPE)) {
o.setWorkerMachineType(m.get(MACHINE_TYPE));
} else {
o.setWorkerMachineType(DEFAULT_MACHINE_TYPE);
}
return o;
}
示例2: getCloudExecutionOptions
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
private PipelineOptions getCloudExecutionOptions(String stagingLocation) {
DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
options.setProject(Constants.PROJECT_ID);
options.setStagingLocation(stagingLocation);
options.setRunner(BlockingDataflowPipelineRunner.class);
return options;
}
示例3: getCloudExecutionOptions
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
private PipelineOptions getCloudExecutionOptions(String stagingLocation) {
DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
options.setProject(SystemProperty.applicationId.get());
options.setStagingLocation(stagingLocation);
options.setRunner(BlockingDataflowPipelineRunner.class);
return options;
}
示例4: getCloudExecutionOptions
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
private static PipelineOptions getCloudExecutionOptions(String stagingLocation) {
DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
options.setProject(SystemProperty.applicationId.get());
options.setStagingLocation(stagingLocation);
options.setRunner(BlockingDataflowPipelineRunner.class);
return options;
}
示例5: run
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
public static void run() {
DataflowPipelineOptions options = PipelineOptionsFactory.create()
.as(DataflowPipelineOptions.class);
options.setRunner(BlockingDataflowPipelineRunner.class);
options.setProject("chrome-oven-144308");
options.setFilesToStage(
detectClassPathResourcesToStage(
DataflowPipelineRunner.class.getClassLoader()
)
);
options.setStagingLocation("gs://dataflow-chrome-oven-144308/stagingForScheduledPipeline");
Pipeline p = Pipeline.create(options);
System.out.println("get here 0");
p.apply(TextIO.Read.from("gs://dataflow-samples/shakespeare/*"))
.apply(ParDo.named("ExtractWords").of(new DoFn<String, String>() {
@Override
public void processElement(ProcessContext c) {
System.out.println("get here 1");
for (String word : c.element().split("[^a-zA-Z']+")) {
if (!word.isEmpty()) {
c.output(word);
}
}
}
}))
.apply(Count.<String>perElement())
.apply("FormatResults", MapElements.via(new SimpleFunction<KV<String, Long>, String>() {
@Override
public String apply(KV<String, Long> input) {
System.out.println("get here 3");
return input.getKey() + ": " + input.getValue();
}
}))
.apply(TextIO.Write.to("gs://dataflow-chrome-oven-144308/scheduled"));
p.run();
}
示例6: main
import com.google.cloud.dataflow.sdk.options.DataflowPipelineOptions; //导入方法依赖的package包/类
public static void main(String[] args) throws GeneralSecurityException, IOException, ParseException, ParserConfigurationException, SAXException {
String params = null;
for (int i = 0; i < args.length; i++) {
if (args[i].startsWith("--params="))
params = args[i].replaceFirst("--params=", "");
}
System.out.println(params);
init(params);
GoogleCredential credential = new GoogleCredential.Builder()
.setTransport(new NetHttpTransport())
.setJsonFactory(new JacksonFactory())
.setServiceAccountId(accountEmail)
.setServiceAccountScopes(Arrays.asList(new String[] {"https://www.googleapis.com/auth/cloud-platform"}))
.setServiceAccountPrivateKeyFromP12File(new File(keyFile))
.build();
DataflowPipelineOptions options = PipelineOptionsFactory.create().as(DataflowPipelineOptions.class);
options.setRunner(DataflowPipelineRunner.class);
// Your project ID is required in order to run your pipeline on the Google Cloud.
options.setProject(projectId);
// Your Google Cloud Storage path is required for staging local files.
options.setStagingLocation(workingBucket);
options.setGcpCredential(credential);
options.setServiceAccountName(accountEmail);
options.setServiceAccountKeyfile(keyFile);
options.setMaxNumWorkers(maxNumWorkers);
options.setDiskSizeGb(diskSizeGb);
options.setWorkerMachineType(machineType);
options.setAutoscalingAlgorithm(AutoscalingAlgorithmType.THROUGHPUT_BASED);
options.setZone(zone);
options.setStreaming(isStreaming);
options.setJobName(pipelineName);
Gson gson = new Gson();
TableSchema schema = gson.fromJson(schemaStr, TableSchema.class);
Pipeline pipeline = Pipeline.create(options);
PCollection<String> streamData =
pipeline.apply(PubsubIO.Read.named("ReadFromPubsub")
.topic(String.format("projects/%1$s/topics/%2$s",projectId,pubSubTopic)));
PCollection<TableRow> tableRow = streamData.apply("ToTableRow", ParDo.of(new PrepData.ToTableRow()));
tableRow.apply(BigQueryIO.Write
.named("WriteBQTable")
.to(String.format("%1$s:%2$s.%3$s",projectId, bqDataSet, bqTable))
.withSchema(schema)
.withWriteDisposition(BigQueryIO.Write.WriteDisposition.WRITE_APPEND));
System.out.println("Starting pipeline " + pipelineName);
pipeline.run();
}