當前位置: 首頁>>代碼示例>>Java>>正文


Java RunningJob.isComplete方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.RunningJob.isComplete方法的典型用法代碼示例。如果您正苦於以下問題:Java RunningJob.isComplete方法的具體用法?Java RunningJob.isComplete怎麽用?Java RunningJob.isComplete使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.RunningJob的用法示例。


在下文中一共展示了RunningJob.isComplete方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestKeyFieldBasedComparator.java

示例2: runJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:DataJoinJob.java

示例3: testFailoverWhileRunningJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testFailoverWhileRunningJob() throws Exception {
  LOG.info("Running job failover test");

  // Inspired by TestRecoveryManager#testJobResubmission
  
  FileUtil.fullyDelete(new File("/tmp/tst"));
  
  // start a job on jt1
  JobConf job1 = new JobConf(conf);
  String signalFile = new Path(TEST_DIR, "signal").toString();
  UtilsForTests.configureWaitingJobConf(job1, new Path(TEST_DIR, "input"),
      new Path(TEST_DIR, "output3"), 2, 0, "test-resubmission", signalFile,
      signalFile);
  JobClient jc = new JobClient(job1);
  RunningJob rJob1 = jc.submitJob(job1);
  while (rJob1.mapProgress() < 0.5f) {
    LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done: " +
        rJob1.mapProgress());
    UtilsForTests.waitFor(500);
  }
  LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done: " +
      rJob1.mapProgress());
  
  // Shut the first JT down, causing automatic failover
  LOG.info("Shutting down jt1");
  cluster.shutdownJobTracker(0);
  
  // allow job to complete
  FileSystem fs = FileSystem.getLocal(conf);
  fs.create(new Path(TEST_DIR, "signal"));
  while (!rJob1.isComplete()) {
    LOG.info("Waiting for job " + rJob1.getID() + " to be successful: " +
        rJob1.mapProgress());
    UtilsForTests.waitFor(500);
  }
  assertTrue("Job should be successful", rJob1.isSuccessful());
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:39,代碼來源:TestMRZKFailoverController.java

示例4: verifyCompletedJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
/**
 * Verification API to check if the job completion state is correct. <br/>
 * 
 * @param id id of the job to be verified.
 */

public void verifyCompletedJob(JobID id) throws Exception{
  RunningJob rJob = getClient().getJob(
      org.apache.hadoop.mapred.JobID.downgrade(id));
  while(!rJob.isComplete()) {
    LOG.info("waiting for job :" + id + " to retire");
    Thread.sleep(1000);
    rJob = getClient().getJob(
        org.apache.hadoop.mapred.JobID.downgrade(id));
  }
  verifyJobDetails(id);
  verifyJobHistory(id);
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:19,代碼來源:JTClient.java

示例5: runProgram

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
  		                        new Utils.OutputFileUtils
  		                                 .OutputFilesFilter()))) {
    results.add(MapReduceTestUtil.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:63,代碼來源:TestPipes.java

示例6: runProgram

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(MapReduceTestUtil.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:62,代碼來源:TestPipes.java

示例7: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set("map.output.key.field.separator", " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:69,代碼來源:TestKeyFieldBasedComparator.java

示例8: runProgram

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:62,代碼來源:TestPipes.java

示例9: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:69,代碼來源:TestKeyFieldBasedComparator.java

示例10: run

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private void run(boolean ioEx, boolean rtEx) throws Exception {
  Path inDir = new Path("testing/mt/input");
  Path outDir = new Path("testing/mt/output");

  // Hack for local FS that does not have the concept of a 'mounting point'
  if (isLocalFS()) {
    String localPathRoot = System.getProperty("test.build.data", "/tmp")
            .replace(' ', '+');
    inDir = new Path(localPathRoot, inDir);
    outDir = new Path(localPathRoot, outDir);
  }


  JobConf conf = createJobConf();
  FileSystem fs = FileSystem.get(conf);

  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes("a\nb\n\nc\nd\ne");
    file.close();
  }

  conf.setJobName("mt");
  conf.setInputFormat(TextInputFormat.class);

  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapOutputKeyClass(LongWritable.class);
  conf.setMapOutputValueClass(Text.class);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapperClass(IDMap.class);
  conf.setReducerClass(IDReduce.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);

  conf.setMapRunnerClass(MultithreadedMapRunner.class);
  
  conf.setInt(MultithreadedMapper.NUM_THREADS, 2);

  if (ioEx) {
    conf.setBoolean("multithreaded.ioException", true);
  }
  if (rtEx) {
    conf.setBoolean("multithreaded.runtimeException", true);
  }
  JobClient jc = new JobClient(conf);
  RunningJob job =jc.submitJob(conf);
  while (!job.isComplete()) {
    Thread.sleep(100);
  }

  if (job.isSuccessful()) {
    assertFalse(ioEx || rtEx);
  }
  else {
    assertTrue(ioEx || rtEx);
  }

}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:70,代碼來源:TestMultithreadedMapRunner.java

示例11: updateProgress

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
@Override
public void updateProgress() {

	for (HadoopJob job : jobs.values()) {

		int state = WAIT;

		if (job != null) {

			String hadoopJobId = job.getJobId();

			if (hadoopJobId != null) {

				RunningJob hadoopJob = HadoopUtil.getInstance().getJob(
						hadoopJobId);
				try {

					if (hadoopJob != null) {

						if (hadoopJob.isComplete()) {

							if (hadoopJob.isSuccessful()) {
								state = OK;
							} else {
								state = FAILED;
							}

						} else {

							if (hadoopJob.getJobStatus().mapProgress() > 0) {

								state = RUNNING;

							} else {
								state = WAIT;
							}

						}

					} else {
						state = WAIT;
					}
				} catch (IOException e) {

					state = WAIT;

				}

			}

		}

		states.put(job, state);

	}

}
 
開發者ID:genepi,項目名稱:imputationserver,代碼行數:58,代碼來源:ParallelHadoopJobStep.java


注:本文中的org.apache.hadoop.mapred.RunningJob.isComplete方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。