当前位置: 首页>>代码示例>>Java>>正文


Java Datasets.delete方法代码示例

本文整理汇总了Java中org.kitesdk.data.Datasets.delete方法的典型用法代码示例。如果您正苦于以下问题:Java Datasets.delete方法的具体用法?Java Datasets.delete怎么用?Java Datasets.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.kitesdk.data.Datasets的用法示例。


在下文中一共展示了Datasets.delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFileStoreWithSavePolicy

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testFileStoreWithSavePolicy() throws EventDeliveryException {
  if (Datasets.exists(ERROR_DATASET_URI)) {
    Datasets.delete(ERROR_DATASET_URI);
  }
  config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
      DatasetSinkConstants.SAVE_FAILURE_POLICY);
  config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
      ERROR_DATASET_URI);
  DatasetSink sink = sink(in, config);

  // run the sink
  sink.start();
  sink.process();
  sink.stop();

  Assert.assertEquals(
      Sets.newHashSet(expected),
      read(Datasets.load(FILE_DATASET_URI)));
  Assert.assertEquals("Should have committed", 0, remaining(in));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:22,代码来源:TestDatasetSink.java

示例2: setup

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Before
public void setup() throws EventDeliveryException {
  Datasets.delete(FILE_DATASET_URI);
  Datasets.create(FILE_DATASET_URI, DESCRIPTOR);

  this.config = new Context();
  config.put("keep-alive", "0");
  this.in = new MemoryChannel();
  Configurables.configure(in, config);

  config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, FILE_DATASET_URI);

  GenericRecordBuilder builder = new GenericRecordBuilder(RECORD_SCHEMA);
  expected = Lists.<GenericRecord>newArrayList(
      builder.set("id", "1").set("msg", "msg1").build(),
      builder.set("id", "2").set("msg", "msg2").build(),
      builder.set("id", "3").set("msg", "msg3").build());

  putToChannel(in, Iterables.transform(expected,
      new Function<GenericRecord, Event>() {
        private int i = 0;

        @Override
        public Event apply(@Nullable GenericRecord rec) {
          this.i += 1;
          boolean useURI = (i % 2) == 0;
          return event(rec, RECORD_SCHEMA, SCHEMA_FILE, useURI);
        }
      }));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:31,代码来源:TestDatasetSink.java

示例3: testParquetDataset

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testParquetDataset() throws EventDeliveryException {
  Datasets.delete(FILE_DATASET_URI);
  Dataset<GenericRecord> created = Datasets.create(FILE_DATASET_URI,
      new DatasetDescriptor.Builder(DESCRIPTOR)
          .format("parquet")
          .build());

  DatasetSink sink = sink(in, config);

  // run the sink
  sink.start();
  sink.process();

  // the transaction should not commit during the call to process
  assertThrows("Transaction should still be open", IllegalStateException.class,
      new Callable() {
        @Override
        public Object call() throws EventDeliveryException {
          in.getTransaction().begin();
          return null;
        }
      });
  // The records won't commit until the call to stop()
  Assert.assertEquals("Should not have committed", 0, read(created).size());

  sink.stop();

  Assert.assertEquals(Sets.newHashSet(expected), read(created));
  Assert.assertEquals("Should have committed", 0, remaining(in));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:32,代码来源:TestDatasetSink.java

示例4: testPartitionedData

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testPartitionedData() throws EventDeliveryException {
  URI partitionedUri = URI.create("dataset:file:target/test_repo/partitioned");
  try {
    Datasets.create(partitionedUri, new DatasetDescriptor.Builder(DESCRIPTOR)
        .partitionStrategy(new PartitionStrategy.Builder()
            .identity("id", 10) // partition by id
            .build())
        .build());

    config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI,
        partitionedUri.toString());
    DatasetSink sink = sink(in, config);

    // run the sink
    sink.start();
    sink.process();
    sink.stop();

    Assert.assertEquals(
        Sets.newHashSet(expected),
        read(Datasets.load(partitionedUri)));
    Assert.assertEquals("Should have committed", 0, remaining(in));
  } finally {
    if (Datasets.exists(partitionedUri)) {
      Datasets.delete(partitionedUri);
    }
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:30,代码来源:TestDatasetSink.java

示例5: testStartBeforeDatasetCreated

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testStartBeforeDatasetCreated() throws EventDeliveryException {
  // delete the dataset created by setup
  Datasets.delete(FILE_DATASET_URI);

  DatasetSink sink = sink(in, config);

  // start the sink
  sink.start();

  // run the sink without a target dataset
  try {
    sink.process();
    Assert.fail("Should have thrown an exception: no such dataset");
  } catch (EventDeliveryException e) {
    // expected
  }

  // create the target dataset
  Datasets.create(FILE_DATASET_URI, DESCRIPTOR);

  // run the sink
  sink.process();
  sink.stop();

  Assert.assertEquals(Sets.newHashSet(expected), read(Datasets.load(FILE_DATASET_URI)));
  Assert.assertEquals("Should have committed", 0, remaining(in));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:29,代码来源:TestDatasetSink.java

示例6: testMiniClusterStore

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testMiniClusterStore() throws EventDeliveryException, IOException {
  // setup a minicluster
  MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new Configuration())
      .build();

  FileSystem dfs = cluster.getFileSystem();
  Configuration conf = dfs.getConf();

  URI hdfsUri = URI.create(
      "dataset:" + conf.get("fs.defaultFS") + "/tmp/repo" + DATASET_NAME);
  try {
    // create a repository and dataset in HDFS
    Datasets.create(hdfsUri, DESCRIPTOR);

    // update the config to use the HDFS repository
    config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, hdfsUri.toString());

    DatasetSink sink = sink(in, config);

    // run the sink
    sink.start();
    sink.process();
    sink.stop();

    Assert.assertEquals(
        Sets.newHashSet(expected),
        read(Datasets.load(hdfsUri)));
    Assert.assertEquals("Should have committed", 0, remaining(in));

  } finally {
    if (Datasets.exists(hdfsUri)) {
      Datasets.delete(hdfsUri);
    }
    cluster.shutdown();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:39,代码来源:TestDatasetSink.java

示例7: testMissingSchemaWithSavePolicy

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testMissingSchemaWithSavePolicy() throws EventDeliveryException {
  if (Datasets.exists(ERROR_DATASET_URI)) {
    Datasets.delete(ERROR_DATASET_URI);
  }
  config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
      DatasetSinkConstants.SAVE_FAILURE_POLICY);
  config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
      ERROR_DATASET_URI);
  final DatasetSink sink = sink(in, config);

  Event badEvent = new SimpleEvent();
  badEvent.setHeaders(Maps.<String, String>newHashMap());
  badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
  putToChannel(in, badEvent);

  // run the sink
  sink.start();
  sink.process();
  sink.stop();

  Assert.assertEquals("Good records should have been written",
      Sets.newHashSet(expected),
      read(Datasets.load(FILE_DATASET_URI)));
  Assert.assertEquals("Should not have rolled back", 0, remaining(in));
  Assert.assertEquals("Should have saved the bad event",
      Sets.newHashSet(AvroFlumeEvent.newBuilder()
        .setBody(ByteBuffer.wrap(badEvent.getBody()))
        .setHeaders(toUtf8Map(badEvent.getHeaders()))
        .build()),
      read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:33,代码来源:TestDatasetSink.java

示例8: testSerializedWithIncompatibleSchemasWithSavePolicy

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Test
public void testSerializedWithIncompatibleSchemasWithSavePolicy()
    throws EventDeliveryException {
  if (Datasets.exists(ERROR_DATASET_URI)) {
    Datasets.delete(ERROR_DATASET_URI);
  }
  config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
      DatasetSinkConstants.SAVE_FAILURE_POLICY);
  config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
      ERROR_DATASET_URI);
  final DatasetSink sink = sink(in, config);

  GenericRecordBuilder builder = new GenericRecordBuilder(
      INCOMPATIBLE_SCHEMA);
  GenericData.Record rec = builder.set("username", "koala").build();

  // We pass in a valid schema in the header, but an incompatible schema
  // was used to serialize the record
  Event badEvent = event(rec, INCOMPATIBLE_SCHEMA, SCHEMA_FILE, true);
  putToChannel(in, badEvent);

  // run the sink
  sink.start();
  sink.process();
  sink.stop();

  Assert.assertEquals("Good records should have been written",
      Sets.newHashSet(expected),
      read(Datasets.load(FILE_DATASET_URI)));
  Assert.assertEquals("Should not have rolled back", 0, remaining(in));
  Assert.assertEquals("Should have saved the bad event",
      Sets.newHashSet(AvroFlumeEvent.newBuilder()
        .setBody(ByteBuffer.wrap(badEvent.getBody()))
        .setHeaders(toUtf8Map(badEvent.getHeaders()))
        .build()),
      read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:38,代码来源:TestDatasetSink.java

示例9: tearDown

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Override
public void tearDown() {
  super.tearDown();
  String uri = "dataset:file:" + getTablePath();
  if (Datasets.exists(uri)) {
    Datasets.delete(uri);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:9,代码来源:TestParquetImport.java

示例10: mergeDataset

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
/**
 * Merges a dataset into this.
 */
public void mergeDataset(String uri) {
  FileSystemDataset<GenericRecord> update = Datasets.load(uri);
  if (dataset instanceof FileSystemDataset) {
    ((FileSystemDataset<GenericRecord>) dataset).merge(update);
    // And let's completely drop the temporary dataset
    Datasets.delete(uri);
  } else {
    throw new SqoopException(
        KiteConnectorError.GENERIC_KITE_CONNECTOR_0000, uri);
  }
}
 
开发者ID:vybs,项目名称:sqoop-on-spark,代码行数:15,代码来源:KiteDatasetExecutor.java

示例11: run

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  // Delete the users dataset
  boolean success = Datasets.delete("dataset:hdfs:/tmp/data/users");

  return success ? 0 : 1;
}
 
开发者ID:kite-sdk,项目名称:kite-examples,代码行数:8,代码来源:DeleteUserDataset.java

示例12: run

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  // Delete the users dataset
  boolean success = Datasets.delete("dataset:hive?dataset=users");

  return success ? 0 : 1;
}
 
开发者ID:kite-sdk,项目名称:kite-examples,代码行数:8,代码来源:DeleteHiveUserDataset.java

示例13: run

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  // Delete the products dataset
  boolean success = Datasets.delete("dataset:hdfs:/tmp/data/products");

  return success ? 0 : 1;
}
 
开发者ID:kite-sdk,项目名称:kite-examples,代码行数:8,代码来源:DeleteProductDataset.java

示例14: run

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {

  // Drop the events dataset
  boolean success = Datasets.delete("dataset:hive:/tmp/data/default/events");

  return success ? 0 : 1;
}
 
开发者ID:kite-sdk,项目名称:kite-examples,代码行数:9,代码来源:DeleteDataset.java

示例15: teardown

import org.kitesdk.data.Datasets; //导入方法依赖的package包/类
@After
public void teardown() {
  Datasets.delete(FILE_DATASET_URI);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:5,代码来源:TestDatasetSink.java


注:本文中的org.kitesdk.data.Datasets.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。