当前位置: 首页>>代码示例>>Java>>正文


Java ImportTool类代码示例

本文整理汇总了Java中com.cloudera.sqoop.tool.ImportTool的典型用法代码示例。如果您正苦于以下问题:Java ImportTool类的具体用法?Java ImportTool怎么用?Java ImportTool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ImportTool类属于com.cloudera.sqoop.tool包,在下文中一共展示了ImportTool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runSqoopImport

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
private void runSqoopImport(String[] importCols) {
  Configuration conf = getConf();
    SqoopOptions opts = getSqoopOptions(conf);
    String username = MSSQLTestUtils.getDBUserName();
    String password = MSSQLTestUtils.getDBPassWord();
    opts.setUsername(username);
    opts.setPassword(password);

    // run the tool through the normal entry-point.
    int ret;
    try {
      Sqoop importer = new Sqoop(new ImportTool(), conf, opts);
      ret = Sqoop.runSqoop(importer, getArgv(true, importCols, conf));
    } catch (Exception e) {
      LOG.error("Got exception running Sqoop: " + e.toString());
      throw new RuntimeException(e);
    }

    // expect a successful return.
    assertEquals("Failure during job", 0, ret);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:22,代码来源:SQLServerDatatypeImportDelimitedFileManualTest.java

示例2: importData

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
private void importData(String targetDir, SqoopOptions.FileLayout fileLayout) {
  SqoopOptions options;
  options = getSqoopOptions(newConf());
  options.setTableName(TABLE_NAME);
  options.setNumMappers(1);
  options.setFileLayout(fileLayout);
  options.setDeleteMode(true);

  Path warehouse = new Path(BaseSqoopTestCase.LOCAL_WAREHOUSE_DIR);
  options.setTargetDir(new Path(warehouse, targetDir).toString());

  ImportTool importTool = new ImportTool();
  Sqoop importer = new Sqoop(importTool, options.getConf(), options);
  int ret = Sqoop.runSqoop(importer, new String[0]);
  if (0 != ret) {
    fail("Initial import failed with exit code " + ret);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:19,代码来源:TestMerge.java

示例3: testCreateOverwriteHiveImportAsParquet

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/**
 * Test that table is created in hive and replaces the existing table if
 * any.
 */
@Test
public void testCreateOverwriteHiveImportAsParquet() throws IOException {
  final String TABLE_NAME = "CREATE_OVERWRITE_HIVE_IMPORT_AS_PARQUET";
  setCurTableName(TABLE_NAME);
  setNumCols(3);
  String [] types = { "VARCHAR(32)", "INTEGER", "CHAR(64)" };
  String [] vals = { "'test'", "42", "'somestring'" };
  String [] extraArgs = {"--as-parquetfile"};
  ImportTool tool = new ImportTool();

  runImportTest(TABLE_NAME, types, vals, "", getArgv(false, extraArgs), tool);
  verifyHiveDataset(TABLE_NAME, new Object[][]{{"test", 42, "somestring"}});

  String [] valsToOverwrite = { "'test2'", "24", "'somestring2'" };
  String [] extraArgsForOverwrite = {"--as-parquetfile", "--hive-overwrite"};
  runImportTest(TABLE_NAME, types, valsToOverwrite, "",
      getArgv(false, extraArgsForOverwrite), tool);
  verifyHiveDataset(TABLE_NAME, new Object[][] {{"test2", 24, "somestring2"}});
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:24,代码来源:TestHiveImport.java

示例4: testAppendHiveImportAsParquet

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/**
 * Test that records are appended to an existing table.
 */
@Test
public void testAppendHiveImportAsParquet() throws IOException {
  final String TABLE_NAME = "APPEND_HIVE_IMPORT_AS_PARQUET";
  setCurTableName(TABLE_NAME);
  setNumCols(3);
  String [] types = { "VARCHAR(32)", "INTEGER", "CHAR(64)" };
  String [] vals = { "'test'", "42", "'somestring'" };
  String [] extraArgs = {"--as-parquetfile"};
  String [] args = getArgv(false, extraArgs);
  ImportTool tool = new ImportTool();

  runImportTest(TABLE_NAME, types, vals, "", args, tool);
  verifyHiveDataset(TABLE_NAME, new Object[][]{{"test", 42, "somestring"}});

  String [] valsToAppend = { "'test2'", "4242", "'somestring2'" };
  runImportTest(TABLE_NAME, types, valsToAppend, "", args, tool);
  verifyHiveDataset(TABLE_NAME, new Object[][] {
      {"test2", 4242, "somestring2"}, {"test", 42, "somestring"}});
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:23,代码来源:TestHiveImport.java

示例5: testHiveExitFails

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/** If bin/hive returns an error exit status, we should get an IOException. */
@Test
public void testHiveExitFails() {
  // The expected script is different than the one which would be generated
  // by this, so we expect an IOException out.
  final String TABLE_NAME = "FAILING_HIVE_IMPORT";
  setCurTableName(TABLE_NAME);
  setNumCols(2);
  String [] types = { "NUMERIC", "CHAR(64)" };
  String [] vals = { "3.14159", "'foo'" };
  try {
    runImportTest(TABLE_NAME, types, vals, "failingImport.q",
        getArgv(false, null), new ImportTool());
    // If we get here, then the run succeeded -- which is incorrect.
    fail("FAILING_HIVE_IMPORT test should have thrown IOException");
  } catch (IOException ioe) {
    // expected; ok.
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:20,代码来源:TestHiveImport.java

示例6: testHiveDropAndReplaceOptionValidation

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/**
 * Test hive drop and replace option validation.
 */
@Test
public void testHiveDropAndReplaceOptionValidation() throws ParseException {
  LOG.info("Testing conflicting Hive delimiter drop/replace options");

  setNumCols(3);
  String[] moreArgs = { "--"+BaseSqoopTool.HIVE_DELIMS_REPLACEMENT_ARG, " ",
    "--"+BaseSqoopTool.HIVE_DROP_DELIMS_ARG, };

  ImportTool tool = new ImportTool();
  try {
    tool.validateOptions(tool.parseArguments(getArgv(false, moreArgs), null,
        null, true));
    fail("Expected InvalidOptionsException");
  } catch (InvalidOptionsException ex) {
    /* success */
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:21,代码来源:TestHiveImport.java

示例7: testImportHiveWithPartitions

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/**
 * Test hive import with row that has new line in it.
 */
@Test
public void testImportHiveWithPartitions() throws IOException,
    InterruptedException {
  final String TABLE_NAME = "PARTITION_HIVE_IMPORT";

  LOG.info("Doing import of single row into PARTITION_HIVE_IMPORT table");
  setCurTableName(TABLE_NAME);
  setNumCols(3);
  String[] types = { "VARCHAR(32)", "INTEGER", "CHAR(64)", };
  String[] vals = { "'whoop'", "42", "'I am a row in a partition'", };
  String[] moreArgs = { "--" + BaseSqoopTool.HIVE_PARTITION_KEY_ARG, "ds",
      "--" + BaseSqoopTool.HIVE_PARTITION_VALUE_ARG, "20110413", };

  runImportTest(TABLE_NAME, types, vals, "partitionImport.q",
      getArgv(false, moreArgs), new ImportTool());
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:20,代码来源:TestHiveImport.java

示例8: testPartitions

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testPartitions() throws Exception {
  String[] args = {
      "--hive-partition-key", "ds",
      "--hive-partition-value", "20110413",
  };
  Configuration conf = new Configuration();
  SqoopOptions options =
    new ImportTool().parseArguments(args, null, null, false);
  TableDefWriter writer = new TableDefWriter(options,
      null, "inputTable", "outputTable", conf, false);

  Map<String, Integer> colTypes = new SqlTypeMap<String, Integer>();
  writer.setColumnTypes(colTypes);

  String createTable = writer.getCreateTableStmt();
  String loadData = writer.getLoadDataStmt();

  assertNotNull(createTable);
  assertNotNull(loadData);
  assertEquals("CREATE TABLE IF NOT EXISTS `outputTable` ( ) "
      + "PARTITIONED BY (ds STRING) "
      + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\054' "
      + "LINES TERMINATED BY '\\012' STORED AS TEXTFILE", createTable);
  assertTrue(loadData.endsWith(" PARTITION (ds='20110413')"));
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:26,代码来源:TestTableDefWriter.java

示例9: testUserMapping

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testUserMapping() throws Exception {
  String[] args = {
      "--map-column-hive", "id=STRING,value=INTEGER",
  };
  Configuration conf = new Configuration();
  SqoopOptions options =
    new ImportTool().parseArguments(args, null, null, false);
  TableDefWriter writer = new TableDefWriter(options,
      null, HsqldbTestServer.getTableName(), "outputTable", conf, false);

  Map<String, Integer> colTypes = new SqlTypeMap<String, Integer>();
  colTypes.put("id", Types.INTEGER);
  colTypes.put("value", Types.VARCHAR);
  writer.setColumnTypes(colTypes);

  String createTable = writer.getCreateTableStmt();

  assertNotNull(createTable);

  assertTrue(createTable.contains("`id` STRING"));
  assertTrue(createTable.contains("`value` INTEGER"));

  assertFalse(createTable.contains("`id` INTEGER"));
  assertFalse(createTable.contains("`value` STRING"));
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:26,代码来源:TestTableDefWriter.java

示例10: testUserMappingFailWhenCantBeApplied

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testUserMappingFailWhenCantBeApplied() throws Exception {
  String[] args = {
      "--map-column-hive", "id=STRING,value=INTEGER",
  };
  Configuration conf = new Configuration();
  SqoopOptions options =
    new ImportTool().parseArguments(args, null, null, false);
  TableDefWriter writer = new TableDefWriter(options,
      null, HsqldbTestServer.getTableName(), "outputTable", conf, false);

  Map<String, Integer> colTypes = new SqlTypeMap<String, Integer>();
  colTypes.put("id", Types.INTEGER);
  writer.setColumnTypes(colTypes);

  try {
    String createTable = writer.getCreateTableStmt();
    fail("Expected failure on non applied mapping.");
  } catch(IllegalArgumentException iae) {
    // Expected, ok
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:22,代码来源:TestTableDefWriter.java

示例11: testHiveDatabase

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testHiveDatabase() throws Exception {
  String[] args = {
      "--hive-database", "db",
  };
  Configuration conf = new Configuration();
  SqoopOptions options =
    new ImportTool().parseArguments(args, null, null, false);
  TableDefWriter writer = new TableDefWriter(options,
      null, HsqldbTestServer.getTableName(), "outputTable", conf, false);

  Map<String, Integer> colTypes = new SqlTypeMap<String, Integer>();
  writer.setColumnTypes(colTypes);

  String createTable = writer.getCreateTableStmt();
  assertNotNull(createTable);
  assertTrue(createTable.contains("`db`.`outputTable`"));

  String loadStmt = writer.getLoadDataStmt();
  assertNotNull(loadStmt);
  assertTrue(createTable.contains("`db`.`outputTable`"));
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:22,代码来源:TestTableDefWriter.java

示例12: runUncleanImport

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
/** the same than ImportJobTestCase but without removing tabledir. */
protected void runUncleanImport(String[] argv) throws IOException {
  // run the tool through the normal entry-point.
  int ret;
  try {
    Configuration conf = getConf();
    SqoopOptions opts = getSqoopOptions(conf);
    Sqoop sqoop = new Sqoop(new ImportTool(), conf, opts);
    ret = Sqoop.runSqoop(sqoop, argv);
  } catch (Exception e) {
    LOG.error("Got exception running Sqoop: " + e.toString());
    e.printStackTrace();
    ret = 1;
  }

  // expect a successful return.
  if (0 != ret) {
    throw new IOException("Failure during job; return status " + ret);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:21,代码来源:TestAppendUtils.java

示例13: testFailedNoColumns

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testFailedNoColumns() throws IOException {
  // Make sure that if a MapReduce job to do the import fails due
  // to an IOException, we tell the user about it.

  // Create a table to attempt to import.
  createTableForColType("VARCHAR(32)", "'meep'");

  Configuration conf = new Configuration();

  // Make the output dir exist so we know the job will fail via IOException.
  Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
  FileSystem fs = FileSystem.getLocal(conf);
  fs.mkdirs(outputPath);
  assertTrue(fs.exists(outputPath));

  String [] argv = getArgv(true, new String [] { "" }, conf);

  Sqoop importer = new Sqoop(new ImportTool());
  try {
    int ret = Sqoop.runSqoop(importer, argv);
    assertTrue("Expected job to fail due to no colnames.", 1==ret);
  } catch (Exception e) {
    // In debug mode, IOException is wrapped in RuntimeException.
    LOG.info("Got exceptional return (expected: ok). msg is: " + e);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:TestImportJob.java

示例14: testDuplicateColumns

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
public void testDuplicateColumns() throws IOException {
  // Make sure that if a MapReduce job to do the import fails due
  // to an IOException, we tell the user about it.

  // Create a table to attempt to import.
  createTableForColType("VARCHAR(32)", "'meep'");

  Configuration conf = new Configuration();

  // Make the output dir exist so we know the job will fail via IOException.
  Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
  FileSystem fs = FileSystem.getLocal(conf);
  fs.mkdirs(outputPath);
  assertTrue(fs.exists(outputPath));

  String[] argv = getArgv(true, new String[] { "DATA_COL0,DATA_COL0" }, conf);

  Sqoop importer = new Sqoop(new ImportTool());
  try {
    int ret = Sqoop.runSqoop(importer, argv);
    assertTrue("Expected job to fail!", 1 == ret);
  } catch (Exception e) {
    // In debug mode, ImportException is wrapped in RuntimeException.
    LOG.info("Got exceptional return (expected: ok). msg is: " + e);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:TestImportJob.java

示例15: runFailedGenerationTest

import com.cloudera.sqoop.tool.ImportTool; //导入依赖的package包/类
private void runFailedGenerationTest(String [] argv,
    String classNameToCheck) {
  File codeGenDirFile = new File(CODE_GEN_DIR);
  File classGenDirFile = new File(JAR_GEN_DIR);

  try {
    options = new ImportTool().parseArguments(argv,
        null, options, true);
  } catch (Exception e) {
    LOG.error("Could not parse options: " + e.toString());
  }

  CompilationManager compileMgr = new CompilationManager(options);
  ClassWriter writer = new ClassWriter(options, manager,
      HsqldbTestServer.getTableName(), compileMgr);

  try {
    writer.generate();
    compileMgr.compile();
    fail("ORM class file generation succeeded when it was expected to fail");
  } catch (Exception ioe) {
    LOG.error("Got Exception from ORM generation as expected : "
      + ioe.toString());
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:26,代码来源:TestClassWriter.java


注:本文中的com.cloudera.sqoop.tool.ImportTool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。