當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.getOwner方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.getOwner方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.getOwner方法的具體用法?Java FileStatus.getOwner怎麽用?Java FileStatus.getOwner使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getOwner方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error(" File Path: " + path.toUri().getPath() + " is not exist in HDFS");
    else {
        try {
            LOG.info("xmlfileanalyzer start parse xml schema, path is {}" , path.toUri().getPath());
            startParseXML(path);
            FileStatus status = fs.getFileStatus(path);
            // replace "\" to  "\\"
            String schemaString = getXMLSchema().replace("\\","\\"+"\\");
            LOG.info("xml file schemaString is {} " , schemaString);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "xml.codec";
            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
        } catch (Exception e) {
            LOG.error("path : {} content " + " is not XML File format content  ",path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }

    }
    return record;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:27,代碼來源:XMLFileAnalyzer.java

示例2: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("file path : {} not in hdfs", path);
    else {
        try {
            ParquetMetadata readFooter = ParquetFileReader.readFooter(fs.getConf(), path, ParquetMetadataConverter.NO_FILTER);
            Map<String, String> schema = readFooter.getFileMetaData().getKeyValueMetaData();
            String allFields = schema.get("org.apache.spark.sql.parquet.row.metadata");
            FileStatus status = fs.getFileStatus(path);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "parquet.codec";
            record = new DatasetJsonRecord(allFields, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("parquetfileanalyzer parse path :{},schema is {}", path.toUri().getPath(), record.toCsvString());

        } catch (Exception e) {
            LOG.error("path : {} content " + " is not Parquet File format content  ", path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }
    }
    return record;

}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:27,代碼來源:ParquetFileAnalyzer.java

示例3: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        Reader orcReader = OrcFile.createReader(fs, targetFilePath);
        String codec = String.valueOf(orcReader.getCompression());
        String schemaString = orcReader.getObjectInspector().getTypeName();
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileStatus(targetFilePath);
        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not ORC File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }

    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:22,代碼來源:OrcFileAnalyzer.java

示例4: makeTestFile

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@BeforeClass
public static void makeTestFile() throws Exception {
  Configuration conf = new Configuration();
  fs = FileSystem.getLocal(conf).getRaw();
  testFilePathIs =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "1")).toUri().getRawPath());
  testFilePathRaf =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "2")).toUri().getRawPath());
  testFilePathFadis =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "3")).toUri().getRawPath());
  for (File f : new File[] { testFilePathIs, testFilePathRaf,
      testFilePathFadis }) {
    FileOutputStream fos = new FileOutputStream(f);
    fos.write("hello".getBytes("UTF-8"));
    fos.close();
  }

  FileStatus stat = fs.getFileStatus(
      new Path(testFilePathIs.toString()));
  // RealOwner and RealGroup would be same for all three files.
  realOwner = stat.getOwner();
  realGroup = stat.getGroup();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:27,代碼來源:TestSecureIOUtils.java

示例5: cleanUpFilesPerUserDir

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void cleanUpFilesPerUserDir(FileContext lfs, DeletionService del,
    Path userDirPath) throws IOException {
  RemoteIterator<FileStatus> userDirStatus = lfs.listStatus(userDirPath);
  FileDeletionTask dependentDeletionTask =
      del.createFileDeletionTask(null, userDirPath, new Path[] {});
  if (userDirStatus != null && userDirStatus.hasNext()) {
    List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
    while (userDirStatus.hasNext()) {
      FileStatus status = userDirStatus.next();
      String owner = status.getOwner();
      FileDeletionTask deletionTask =
          del.createFileDeletionTask(owner, null,
            new Path[] { status.getPath() });
      deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
      deletionTasks.add(deletionTask);
    }
    for (FileDeletionTask task : deletionTasks) {
      del.scheduleFileDeletionTask(task);
    }
  } else {
    del.scheduleFileDeletionTask(dependentDeletionTask);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:ResourceLocalizationService.java

示例6: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("file path : {} not in hdfs", path);
    else {
        try {
            RCFile.Reader reader = new RCFile.Reader(fs, path, fs.getConf());
            Map<Text, Text> meta = reader.getMetadata().getMetadata();
            /** rcfile column number */
            int columnNumber = Integer.parseInt(meta.get(new Text(COLUMN_NUMBER_KEY)).toString());
            FileStatus status = fs.getFileStatus(path);
            String schemaString = getRCFileSchema(columnNumber);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "rc.codec";
            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("rc file : {} schema is {}", path.toUri().getPath(), schemaString);
        } catch (Exception e) {
            LOG.error("path : {} content " + " is not RC File format content  ", path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }
    }

    return record;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:28,代碼來源:RCFileAnalyzer.java

示例7: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    LOG.info("avro file path : " + targetFilePath.toUri().getPath());
    try {
        SeekableInput sin = new FsInput(targetFilePath, fs.getConf());
        DataFileReader<GenericRecord> reader =
                new DataFileReader<GenericRecord>(sin, new GenericDatumReader<GenericRecord>());
        String codec = reader.getMetaString("avro.codec");
        long record_count = reader.getBlockCount();

        String schemaString = reader.getSchema().toString();
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        System.out.println("the schema string is: " + schemaString);
        System.out.println("the abstract path is: " + abstractPath);
      
        FileStatus fstat = fs.getFileStatus(targetFilePath);
        DatasetJsonRecord datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
        reader.close();
        sin.close();
        LOG.info("Avro file datasetjsonrecorc get success, it is : " + datasetJsonRecord);
        return datasetJsonRecord;
    } catch (Exception e) {
        LOG.info("AvroAnalyzer get datasetjson failure, and exception is " + e.getMessage());
        return null;
    }

}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:32,代碼來源:AvroFileAnalyzer.java

示例8: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    String filePath = targetFilePath.toUri().getPath();
    System.out.println("[getSchema] HiveExportFile path : " + filePath);
    // give it a try.
    if (!filePath.contains("000000_0")) return null;
    // if (!filePath.equalsIgnoreCase("/project/T405/out/000000_0")) return null;

    InputStream inputStream = fs.open(targetFilePath);
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
    String str;
    int columnNum = 0;
    while((str = bufferedReader.readLine()) != null) {
        columnNum = str.split(delemiter).length;
        System.out.println(String.format("the first column string is: %s", str));
        break;
    }
    // debug.
    System.out.println("the number of column is: " + columnNum);

    inputStream.close();
    bufferedReader.close();
    // if the number of column is zero, file format unmatched.
    if (columnNum == 1) return null;

    String codec = "plain.codec";
    String schemaString = "{\"fields\": [{\"name\": \"name\", \"type\": \"string\"}, {\"name\": \"age\", \"type\": \"int\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
    String storage = STORAGE_TYPE;
    String abstractPath = targetFilePath.toUri().getPath();

    System.out.println("current file is: " + filePath);
    FileStatus fstat = fs.getFileStatus(targetFilePath);
    DatasetJsonRecord datasetJsonRecord =
            new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                    fstat.getPermission().toString(), codec, storage, "");
    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:39,代碼來源:HiveExportFileAnalyzer.java

示例9: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("sequencefileanalyzer file : " + path.toUri().getPath() + " is not exist on hdfs");
    else {
        try {
            LOG.info("sequencefileanalyzer start parse schema for  file path : {}", path.toUri().getPath());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(path));
            String keyName = "Key";
            String keyType = getWritableType(reader.getKeyClassName());
            String valueName = "Value";
            String valueType = getWritableType(reader.getValueClassName());
            FileStatus status = fs.getFileStatus(path);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "sequence.codec";
            String schemaString = "{\"fields\": [{\"name\": \"" + keyName + "\", \"type\": \"" + keyType + "\"}, {\"name\": \"" + valueName + "\", \"type\": \"" + valueType + "\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.lake\", \"type\": \"record\"}";

            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("sequencefileanalyzer parse path :{},schema is {}", path.toUri().getPath(), record.toCsvString());

        } catch (Exception e) {
            LOG.error("path : {} content " + " is not Sequence File format content  ",path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }

    }
    return record;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:32,代碼來源:SequenceFileAnalyzer.java

示例10: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException {
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        StringBuilder JsonObjectList = new StringBuilder();
        List lsList = this.getLineToData(targetFilePath, 1);
        String[] lsString = (String[]) lsList.get(0);
        for (String realName : lsString) {
            if (realName.indexOf("\"")>=0){
                JsonObjectList.append("{\"name\": " + realName + ", \"type\": \"string\"},");
            }else {
                JsonObjectList.append("{\"name\": \"" + realName + "\", \"type\": \"string\"},");
            }
        }
        JsonObjectList.deleteCharAt(JsonObjectList.length() - 1);
        String schemaString = "{\"fields\":[" + JsonObjectList + "],\"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
        String codec = "csv.codec";
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileLinkStatus(targetFilePath);
        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
        LOG.info("csv schma get success , it is {}", datasetJsonRecord.toCsvString());
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not CSV File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }
    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:30,代碼來源:CSVFileAnalyzer.java

示例11: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException {
    StringBuilder JsonObjectList = new StringBuilder();
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        for (String realName : this.json2Array(getJsonObject(targetFilePath), "schema")) {
            if (realName.charAt(0) == '$') {
                JsonObjectList.append("{\"name\": \"" + realName.substring(1, realName.length()) + "\", \"type\": \"int\"},");
            } else {
                JsonObjectList.append("{\"name\": \"" + realName + "\", \"type\": \"string\"},");
            }
        }
        JsonObjectList.deleteCharAt(JsonObjectList.length() - 1);
        String schemaString = "{\"fields\":[" + JsonObjectList + "],\"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
        String codec = "json.codec";
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileLinkStatus(targetFilePath);

        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not JSON File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }

    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:30,代碼來源:JSONFileAnalyzer.java

示例12: transform

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:16,代碼來源:S3FileSystem.java

示例13: obtainLogDirOwner

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Obtain the owner of the log dir. This is 
 * determined by checking the job's log directory.
 */
static String obtainLogDirOwner(TaskAttemptID taskid) throws IOException {
  Configuration conf = new Configuration();
  FileSystem raw = FileSystem.getLocal(conf).getRaw();
  Path jobLogDir = new Path(getJobDir(taskid.getJobID()).getAbsolutePath());
  FileStatus jobStat = raw.getFileStatus(jobLogDir);
  return jobStat.getOwner();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:TaskLog.java

示例14: getStagingDir

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Initializes the staging directory and returns the path. It also
 * keeps track of all necessary ownership and permissions
 * @param cluster
 * @param conf
 */
public static Path getStagingDir(Cluster cluster, Configuration conf) 
throws IOException,InterruptedException {
  Path stagingArea = cluster.getStagingAreaDir();
  FileSystem fs = stagingArea.getFileSystem(conf);
  String realUser;
  String currentUser;
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  realUser = ugi.getShortUserName();
  currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
  if (fs.exists(stagingArea)) {
    FileStatus fsStatus = fs.getFileStatus(stagingArea);
    String owner = fsStatus.getOwner();
    if (!(owner.equals(currentUser) || owner.equals(realUser))) {
       throw new IOException("The ownership on the staging directory " +
                    stagingArea + " is not as expected. " +
                    "It is owned by " + owner + ". The directory must " +
                    "be owned by the submitter " + currentUser + " or " +
                    "by " + realUser);
    }
    if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
      LOG.info("Permissions on staging directory " + stagingArea + " are " +
        "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
        "to correct value " + JOB_DIR_PERMISSION);
      fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
    }
  } else {
    fs.mkdirs(stagingArea, 
        new FsPermission(JOB_DIR_PERMISSION));
  }
  return stagingArea;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:JobSubmissionFiles.java

示例15: IndexDirectory

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public IndexDirectory(FileStatus fileStatus) {
    this.path = fileStatus.getPath().toString().replaceFirst("hdfs:\\/\\/.+:\\d{4,6}",""); // don't ask
    this.type = IndexEntry.TYPE_DIRECTORY;
    this.time = fileStatus.getModificationTime();
    this.rights = new Short(fileStatus.getPermission().toShort()).toString();
    this.user = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.name = fileStatus.getPath().getName();
}
 
開發者ID:trenner,項目名稱:ahar,代碼行數:10,代碼來源:IndexDirectory.java


注:本文中的org.apache.hadoop.fs.FileStatus.getOwner方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。