當前位置: 首頁>>代碼示例>>Java>>正文


Java FileStatus.getModificationTime方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileStatus.getModificationTime方法的典型用法代碼示例。如果您正苦於以下問題:Java FileStatus.getModificationTime方法的具體用法?Java FileStatus.getModificationTime怎麽用?Java FileStatus.getModificationTime使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getModificationTime方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: isLogDeletable

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public boolean isLogDeletable(FileStatus fStat) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  long time = fStat.getModificationTime();
  long life = currentTime - time;
  
  if (LOG.isTraceEnabled()) {
    LOG.trace("Log life:" + life + ", ttl:" + ttl + ", current:" + currentTime + ", from: "
        + time);
  }
  if (life < 0) {
    LOG.warn("Found a log (" + fStat.getPath() + ") newer than current time (" + currentTime
        + " < " + time + "), probably a clock skew");
    return false;
  }
  return life > ttl;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TimeToLiveLogCleaner.java

示例2: scanIfNeeded

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public synchronized void scanIfNeeded(FileStatus fs) {
  long newModTime = fs.getModificationTime();
  if (modTime != newModTime) {
    Path p = fs.getPath();
    try {
      scanIntermediateDirectory(p);
      //If scanning fails, we will scan again.  We assume the failure is
      // temporary.
      modTime = newModTime;
    } catch (IOException e) {
      LOG.error("Error while trying to scan the directory " + p, e);
    }
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Scan not needed of " + fs.getPath());
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:HistoryFileManager.java

示例3: apply

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:25,代碼來源:PathToPathMetadata.java

示例4: copy

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  if (sStat.getModificationTime() != resource.getTimestamp()) {
    throw new IOException("Resource " + sCopy +
        " changed on src filesystem (expected " + resource.getTimestamp() +
        ", was " + sStat.getModificationTime());
  }
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
      throw new IOException("Resource " + sCopy +
          " is not publicly accessable and as such cannot be part of the" +
          " public cache.");
    }
  }

  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:FSDownload.java

示例5: isResourceEvictable

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public boolean isResourceEvictable(String key, FileStatus file) {
  synchronized (initialAppsLock) {
    if (initialApps.size() > 0) {
      return false;
    }
  }

  long staleTime =
      System.currentTimeMillis()
          - TimeUnit.MINUTES.toMillis(this.stalenessMinutes);
  long accessTime = getAccessTime(key);
  if (accessTime == -1) {
    // check modification time
    long modTime = file.getModificationTime();
    // if modification time is older then the store startup time, we need to
    // just use the store startup time as the last point of certainty
    long lastUse = modTime < this.startTime ? this.startTime : modTime;
    return lastUse < staleTime;
  } else {
    // check access time
    return accessTime < staleTime;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:InMemorySCMStore.java

示例6: testSetTimes

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private void testSetTimes() throws Exception {
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    long at = status1.getAccessTime();
    long mt = status1.getModificationTime();

    fs = getHttpFSFileSystem();
    fs.setTimes(path, mt - 10, at - 20);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    status1 = fs.getFileStatus(path);
    fs.close();
    long atNew = status1.getAccessTime();
    long mtNew = status1.getModificationTime();
    Assert.assertEquals(mtNew, mt - 10);
    Assert.assertEquals(atNew, at - 20);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:BaseTestHttpFSWith.java

示例7: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("file path : {} not in hdfs", path);
    else {
        try {
            RCFile.Reader reader = new RCFile.Reader(fs, path, fs.getConf());
            Map<Text, Text> meta = reader.getMetadata().getMetadata();
            /** rcfile column number */
            int columnNumber = Integer.parseInt(meta.get(new Text(COLUMN_NUMBER_KEY)).toString());
            FileStatus status = fs.getFileStatus(path);
            String schemaString = getRCFileSchema(columnNumber);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "rc.codec";
            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("rc file : {} schema is {}", path.toUri().getPath(), schemaString);
        } catch (Exception e) {
            LOG.error("path : {} content " + " is not RC File format content  ", path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }
    }

    return record;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:28,代碼來源:RCFileAnalyzer.java

示例8: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    LOG.info("avro file path : " + targetFilePath.toUri().getPath());
    try {
        SeekableInput sin = new FsInput(targetFilePath, fs.getConf());
        DataFileReader<GenericRecord> reader =
                new DataFileReader<GenericRecord>(sin, new GenericDatumReader<GenericRecord>());
        String codec = reader.getMetaString("avro.codec");
        long record_count = reader.getBlockCount();

        String schemaString = reader.getSchema().toString();
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        System.out.println("the schema string is: " + schemaString);
        System.out.println("the abstract path is: " + abstractPath);
      
        FileStatus fstat = fs.getFileStatus(targetFilePath);
        DatasetJsonRecord datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
        reader.close();
        sin.close();
        LOG.info("Avro file datasetjsonrecorc get success, it is : " + datasetJsonRecord);
        return datasetJsonRecord;
    } catch (Exception e) {
        LOG.info("AvroAnalyzer get datasetjson failure, and exception is " + e.getMessage());
        return null;
    }

}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:32,代碼來源:AvroFileAnalyzer.java

示例9: checkMultifileStatus

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * Given a file update key, determine whether the source system has changed since we last read the status.
 * @param fileUpdateKey
 * @return The type of status change.
 */
private UpdateStatus checkMultifileStatus(FileUpdateKey fileUpdateKey) {
  final List<FileSystemCachedEntity> cachedEntities = fileUpdateKey.getCachedEntitiesList();
  for (int i = 0; i < cachedEntities.size(); ++i) {
    final FileSystemCachedEntity cachedEntity = cachedEntities.get(i);
    final Path cachedEntityPath =  new Path(cachedEntity.getPath());
    try {

      final Optional<FileStatus> optionalStatus = fs.getFileStatusSafe(cachedEntityPath);
      if(!optionalStatus.isPresent()) {
        // if first entity (root) is missing then table is deleted
        if (i == 0) {
          return UpdateStatus.DELETED;
        }
        // missing directory force update for this dataset
        return UpdateStatus.CHANGED;
      }

      if(cachedEntity.getLastModificationTime() == 0) {
        // this system doesn't support modification times, no need to further probe (S3)
        return UpdateStatus.CHANGED;
      }

      final FileStatus updatedFileStatus = optionalStatus.get();
      final long updatedModificationTime = updatedFileStatus.getModificationTime();
      Preconditions.checkArgument(updatedFileStatus.isDirectory(), "fs based dataset update key must be composed of directories");
      if (cachedEntity.getLastModificationTime() < updatedModificationTime) {
        // the file/folder has been changed since our last check.
        return UpdateStatus.CHANGED;
      }

    } catch (IOException ioe) {
      // continue with other cached entities
      logger.error("Failed to get status for {}", cachedEntityPath, ioe);
      return UpdateStatus.CHANGED;
    }
  }

  return UpdateStatus.UNCHANGED;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:45,代碼來源:FileSystemStoragePlugin2.java

示例10: getFileLists

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
/**
 * @return <expected, gotten, backup>, where each is sorted
 */
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
  List<List<String>> files = new ArrayList<List<String>>();

  // copy over the original files
  List<String> originalFileNames = convertToString(previous);
  files.add(originalFileNames);

  List<String> currentFiles = new ArrayList<String>(previous.length);
  List<FileStatus> backedupFiles = new ArrayList<FileStatus>(previous.length);
  for (FileStatus f : archived) {
    String name = f.getPath().getName();
    // if the file has been backed up
    if (name.contains(".")) {
      Path parent = f.getPath().getParent();
      String shortName = name.split("[.]")[0];
      Path modPath = new Path(parent, shortName);
      FileStatus file = new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
          f.getBlockSize(), f.getModificationTime(), modPath);
      backedupFiles.add(file);
    } else {
      // otherwise, add it to the list to compare to the original store files
      currentFiles.add(name);
    }
  }

  files.add(currentFiles);
  files.add(convertToString(backedupFiles));
  return files;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:HFileArchiveTestingUtil.java

示例11: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    String filePath = targetFilePath.toUri().getPath();
    System.out.println("[getSchema] HiveExportFile path : " + filePath);
    // give it a try.
    if (!filePath.contains("000000_0")) return null;
    // if (!filePath.equalsIgnoreCase("/project/T405/out/000000_0")) return null;

    InputStream inputStream = fs.open(targetFilePath);
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
    String str;
    int columnNum = 0;
    while((str = bufferedReader.readLine()) != null) {
        columnNum = str.split(delemiter).length;
        System.out.println(String.format("the first column string is: %s", str));
        break;
    }
    // debug.
    System.out.println("the number of column is: " + columnNum);

    inputStream.close();
    bufferedReader.close();
    // if the number of column is zero, file format unmatched.
    if (columnNum == 1) return null;

    String codec = "plain.codec";
    String schemaString = "{\"fields\": [{\"name\": \"name\", \"type\": \"string\"}, {\"name\": \"age\", \"type\": \"int\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
    String storage = STORAGE_TYPE;
    String abstractPath = targetFilePath.toUri().getPath();

    System.out.println("current file is: " + filePath);
    FileStatus fstat = fs.getFileStatus(targetFilePath);
    DatasetJsonRecord datasetJsonRecord =
            new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                    fstat.getPermission().toString(), codec, storage, "");
    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:39,代碼來源:HiveExportFileAnalyzer.java

示例12: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("sequencefileanalyzer file : " + path.toUri().getPath() + " is not exist on hdfs");
    else {
        try {
            LOG.info("sequencefileanalyzer start parse schema for  file path : {}", path.toUri().getPath());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(path));
            String keyName = "Key";
            String keyType = getWritableType(reader.getKeyClassName());
            String valueName = "Value";
            String valueType = getWritableType(reader.getValueClassName());
            FileStatus status = fs.getFileStatus(path);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "sequence.codec";
            String schemaString = "{\"fields\": [{\"name\": \"" + keyName + "\", \"type\": \"" + keyType + "\"}, {\"name\": \"" + valueName + "\", \"type\": \"" + valueType + "\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.lake\", \"type\": \"record\"}";

            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("sequencefileanalyzer parse path :{},schema is {}", path.toUri().getPath(), record.toCsvString());

        } catch (Exception e) {
            LOG.error("path : {} content " + " is not Sequence File format content  ",path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }

    }
    return record;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:32,代碼來源:SequenceFileAnalyzer.java

示例13: getSchema

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
@Override
public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException {
    StringBuilder JsonObjectList = new StringBuilder();
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        for (String realName : this.json2Array(getJsonObject(targetFilePath), "schema")) {
            if (realName.charAt(0) == '$') {
                JsonObjectList.append("{\"name\": \"" + realName.substring(1, realName.length()) + "\", \"type\": \"int\"},");
            } else {
                JsonObjectList.append("{\"name\": \"" + realName + "\", \"type\": \"string\"},");
            }
        }
        JsonObjectList.deleteCharAt(JsonObjectList.length() - 1);
        String schemaString = "{\"fields\":[" + JsonObjectList + "],\"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
        String codec = "json.codec";
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileLinkStatus(targetFilePath);

        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not JSON File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }

    return datasetJsonRecord;
}
 
開發者ID:thomas-young-2013,項目名稱:wherehowsX,代碼行數:30,代碼來源:JSONFileAnalyzer.java

示例14: transform

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:16,代碼來源:S3FileSystem.java

示例15: IndexDirectory

import org.apache.hadoop.fs.FileStatus; //導入方法依賴的package包/類
public IndexDirectory(FileStatus fileStatus) {
    this.path = fileStatus.getPath().toString().replaceFirst("hdfs:\\/\\/.+:\\d{4,6}",""); // don't ask
    this.type = IndexEntry.TYPE_DIRECTORY;
    this.time = fileStatus.getModificationTime();
    this.rights = new Short(fileStatus.getPermission().toShort()).toString();
    this.user = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.name = fileStatus.getPath().getName();
}
 
開發者ID:trenner,項目名稱:ahar,代碼行數:10,代碼來源:IndexDirectory.java


注:本文中的org.apache.hadoop.fs.FileStatus.getModificationTime方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。