当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus.getModificationTime方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.getModificationTime方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.getModificationTime方法的具体用法?Java FileStatus.getModificationTime怎么用?Java FileStatus.getModificationTime使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getModificationTime方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: isLogDeletable

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public boolean isLogDeletable(FileStatus fStat) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  long time = fStat.getModificationTime();
  long life = currentTime - time;
  
  if (LOG.isTraceEnabled()) {
    LOG.trace("Log life:" + life + ", ttl:" + ttl + ", current:" + currentTime + ", from: "
        + time);
  }
  if (life < 0) {
    LOG.warn("Found a log (" + fStat.getPath() + ") newer than current time (" + currentTime
        + " < " + time + "), probably a clock skew");
    return false;
  }
  return life > ttl;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TimeToLiveLogCleaner.java

示例2: scanIfNeeded

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public synchronized void scanIfNeeded(FileStatus fs) {
  long newModTime = fs.getModificationTime();
  if (modTime != newModTime) {
    Path p = fs.getPath();
    try {
      scanIntermediateDirectory(p);
      //If scanning fails, we will scan again.  We assume the failure is
      // temporary.
      modTime = newModTime;
    } catch (IOException e) {
      LOG.error("Error while trying to scan the directory " + p, e);
    }
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Scan not needed of " + fs.getPath());
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:HistoryFileManager.java

示例3: apply

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:PathToPathMetadata.java

示例4: copy

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  if (sStat.getModificationTime() != resource.getTimestamp()) {
    throw new IOException("Resource " + sCopy +
        " changed on src filesystem (expected " + resource.getTimestamp() +
        ", was " + sStat.getModificationTime());
  }
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
      throw new IOException("Resource " + sCopy +
          " is not publicly accessable and as such cannot be part of the" +
          " public cache.");
    }
  }

  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSDownload.java

示例5: isResourceEvictable

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public boolean isResourceEvictable(String key, FileStatus file) {
  synchronized (initialAppsLock) {
    if (initialApps.size() > 0) {
      return false;
    }
  }

  long staleTime =
      System.currentTimeMillis()
          - TimeUnit.MINUTES.toMillis(this.stalenessMinutes);
  long accessTime = getAccessTime(key);
  if (accessTime == -1) {
    // check modification time
    long modTime = file.getModificationTime();
    // if modification time is older then the store startup time, we need to
    // just use the store startup time as the last point of certainty
    long lastUse = modTime < this.startTime ? this.startTime : modTime;
    return lastUse < staleTime;
  } else {
    // check access time
    return accessTime < staleTime;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:InMemorySCMStore.java

示例6: testSetTimes

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private void testSetTimes() throws Exception {
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    long at = status1.getAccessTime();
    long mt = status1.getModificationTime();

    fs = getHttpFSFileSystem();
    fs.setTimes(path, mt - 10, at - 20);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    status1 = fs.getFileStatus(path);
    fs.close();
    long atNew = status1.getAccessTime();
    long mtNew = status1.getModificationTime();
    Assert.assertEquals(mtNew, mt - 10);
    Assert.assertEquals(atNew, at - 20);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:BaseTestHttpFSWith.java

示例7: getSchema

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("file path : {} not in hdfs", path);
    else {
        try {
            RCFile.Reader reader = new RCFile.Reader(fs, path, fs.getConf());
            Map<Text, Text> meta = reader.getMetadata().getMetadata();
            /** rcfile column number */
            int columnNumber = Integer.parseInt(meta.get(new Text(COLUMN_NUMBER_KEY)).toString());
            FileStatus status = fs.getFileStatus(path);
            String schemaString = getRCFileSchema(columnNumber);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "rc.codec";
            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("rc file : {} schema is {}", path.toUri().getPath(), schemaString);
        } catch (Exception e) {
            LOG.error("path : {} content " + " is not RC File format content  ", path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }
    }

    return record;
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:28,代码来源:RCFileAnalyzer.java

示例8: getSchema

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    LOG.info("avro file path : " + targetFilePath.toUri().getPath());
    try {
        SeekableInput sin = new FsInput(targetFilePath, fs.getConf());
        DataFileReader<GenericRecord> reader =
                new DataFileReader<GenericRecord>(sin, new GenericDatumReader<GenericRecord>());
        String codec = reader.getMetaString("avro.codec");
        long record_count = reader.getBlockCount();

        String schemaString = reader.getSchema().toString();
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        System.out.println("the schema string is: " + schemaString);
        System.out.println("the abstract path is: " + abstractPath);
      
        FileStatus fstat = fs.getFileStatus(targetFilePath);
        DatasetJsonRecord datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
        reader.close();
        sin.close();
        LOG.info("Avro file datasetjsonrecorc get success, it is : " + datasetJsonRecord);
        return datasetJsonRecord;
    } catch (Exception e) {
        LOG.info("AvroAnalyzer get datasetjson failure, and exception is " + e.getMessage());
        return null;
    }

}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:32,代码来源:AvroFileAnalyzer.java

示例9: checkMultifileStatus

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Given a file update key, determine whether the source system has changed since we last read the status.
 * @param fileUpdateKey
 * @return The type of status change.
 */
private UpdateStatus checkMultifileStatus(FileUpdateKey fileUpdateKey) {
  final List<FileSystemCachedEntity> cachedEntities = fileUpdateKey.getCachedEntitiesList();
  for (int i = 0; i < cachedEntities.size(); ++i) {
    final FileSystemCachedEntity cachedEntity = cachedEntities.get(i);
    final Path cachedEntityPath =  new Path(cachedEntity.getPath());
    try {

      final Optional<FileStatus> optionalStatus = fs.getFileStatusSafe(cachedEntityPath);
      if(!optionalStatus.isPresent()) {
        // if first entity (root) is missing then table is deleted
        if (i == 0) {
          return UpdateStatus.DELETED;
        }
        // missing directory force update for this dataset
        return UpdateStatus.CHANGED;
      }

      if(cachedEntity.getLastModificationTime() == 0) {
        // this system doesn't support modification times, no need to further probe (S3)
        return UpdateStatus.CHANGED;
      }

      final FileStatus updatedFileStatus = optionalStatus.get();
      final long updatedModificationTime = updatedFileStatus.getModificationTime();
      Preconditions.checkArgument(updatedFileStatus.isDirectory(), "fs based dataset update key must be composed of directories");
      if (cachedEntity.getLastModificationTime() < updatedModificationTime) {
        // the file/folder has been changed since our last check.
        return UpdateStatus.CHANGED;
      }

    } catch (IOException ioe) {
      // continue with other cached entities
      logger.error("Failed to get status for {}", cachedEntityPath, ioe);
      return UpdateStatus.CHANGED;
    }
  }

  return UpdateStatus.UNCHANGED;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:45,代码来源:FileSystemStoragePlugin2.java

示例10: getFileLists

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * @return <expected, gotten, backup>, where each is sorted
 */
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
  List<List<String>> files = new ArrayList<List<String>>();

  // copy over the original files
  List<String> originalFileNames = convertToString(previous);
  files.add(originalFileNames);

  List<String> currentFiles = new ArrayList<String>(previous.length);
  List<FileStatus> backedupFiles = new ArrayList<FileStatus>(previous.length);
  for (FileStatus f : archived) {
    String name = f.getPath().getName();
    // if the file has been backed up
    if (name.contains(".")) {
      Path parent = f.getPath().getParent();
      String shortName = name.split("[.]")[0];
      Path modPath = new Path(parent, shortName);
      FileStatus file = new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
          f.getBlockSize(), f.getModificationTime(), modPath);
      backedupFiles.add(file);
    } else {
      // otherwise, add it to the list to compare to the original store files
      currentFiles.add(name);
    }
  }

  files.add(currentFiles);
  files.add(convertToString(backedupFiles));
  return files;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HFileArchiveTestingUtil.java

示例11: getSchema

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public DatasetJsonRecord getSchema(Path targetFilePath)
        throws IOException {
    String filePath = targetFilePath.toUri().getPath();
    System.out.println("[getSchema] HiveExportFile path : " + filePath);
    // give it a try.
    if (!filePath.contains("000000_0")) return null;
    // if (!filePath.equalsIgnoreCase("/project/T405/out/000000_0")) return null;

    InputStream inputStream = fs.open(targetFilePath);
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
    String str;
    int columnNum = 0;
    while((str = bufferedReader.readLine()) != null) {
        columnNum = str.split(delemiter).length;
        System.out.println(String.format("the first column string is: %s", str));
        break;
    }
    // debug.
    System.out.println("the number of column is: " + columnNum);

    inputStream.close();
    bufferedReader.close();
    // if the number of column is zero, file format unmatched.
    if (columnNum == 1) return null;

    String codec = "plain.codec";
    String schemaString = "{\"fields\": [{\"name\": \"name\", \"type\": \"string\"}, {\"name\": \"age\", \"type\": \"int\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
    String storage = STORAGE_TYPE;
    String abstractPath = targetFilePath.toUri().getPath();

    System.out.println("current file is: " + filePath);
    FileStatus fstat = fs.getFileStatus(targetFilePath);
    DatasetJsonRecord datasetJsonRecord =
            new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                    fstat.getPermission().toString(), codec, storage, "");
    return datasetJsonRecord;
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:39,代码来源:HiveExportFileAnalyzer.java

示例12: getSchema

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public DatasetJsonRecord getSchema(Path path) throws IOException {
    DatasetJsonRecord record = null;
    if (!fs.exists(path))
        LOG.error("sequencefileanalyzer file : " + path.toUri().getPath() + " is not exist on hdfs");
    else {
        try {
            LOG.info("sequencefileanalyzer start parse schema for  file path : {}", path.toUri().getPath());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(path));
            String keyName = "Key";
            String keyType = getWritableType(reader.getKeyClassName());
            String valueName = "Value";
            String valueType = getWritableType(reader.getValueClassName());
            FileStatus status = fs.getFileStatus(path);
            String storage = STORAGE_TYPE;
            String abstractPath = path.toUri().getPath();
            String codec = "sequence.codec";
            String schemaString = "{\"fields\": [{\"name\": \"" + keyName + "\", \"type\": \"" + keyType + "\"}, {\"name\": \"" + valueName + "\", \"type\": \"" + valueType + "\"}], \"name\": \"Result\", \"namespace\": \"com.tencent.lake\", \"type\": \"record\"}";

            record = new DatasetJsonRecord(schemaString, abstractPath, status.getModificationTime(), status.getOwner(), status.getGroup(),
                    status.getPermission().toString(), codec, storage, "");
            LOG.info("sequencefileanalyzer parse path :{},schema is {}", path.toUri().getPath(), record.toCsvString());

        } catch (Exception e) {
            LOG.error("path : {} content " + " is not Sequence File format content  ",path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }

    }
    return record;
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:32,代码来源:SequenceFileAnalyzer.java

示例13: getSchema

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
@Override
public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException {
    StringBuilder JsonObjectList = new StringBuilder();
    DatasetJsonRecord datasetJsonRecord = null;
    try {
        for (String realName : this.json2Array(getJsonObject(targetFilePath), "schema")) {
            if (realName.charAt(0) == '$') {
                JsonObjectList.append("{\"name\": \"" + realName.substring(1, realName.length()) + "\", \"type\": \"int\"},");
            } else {
                JsonObjectList.append("{\"name\": \"" + realName + "\", \"type\": \"string\"},");
            }
        }
        JsonObjectList.deleteCharAt(JsonObjectList.length() - 1);
        String schemaString = "{\"fields\":[" + JsonObjectList + "],\"name\": \"Result\", \"namespace\": \"com.tencent.thomas\", \"type\": \"record\"}";
        String codec = "json.codec";
        String storage = STORAGE_TYPE;
        String abstractPath = targetFilePath.toUri().getPath();
        FileStatus fstat = fs.getFileLinkStatus(targetFilePath);

        datasetJsonRecord =
                new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(),
                        fstat.getPermission().toString(), codec, storage, "");
    } catch (Exception e) {
        LOG.error("path : {} content " + " is not JSON File format content  ",targetFilePath.toUri().getPath());
        LOG.info(e.getStackTrace().toString());
    }

    return datasetJsonRecord;
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:30,代码来源:JSONFileAnalyzer.java

示例14: transform

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:16,代码来源:S3FileSystem.java

示例15: IndexDirectory

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public IndexDirectory(FileStatus fileStatus) {
    this.path = fileStatus.getPath().toString().replaceFirst("hdfs:\\/\\/.+:\\d{4,6}",""); // don't ask
    this.type = IndexEntry.TYPE_DIRECTORY;
    this.time = fileStatus.getModificationTime();
    this.rights = new Short(fileStatus.getPermission().toShort()).toString();
    this.user = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.name = fileStatus.getPath().getName();
}
 
开发者ID:trenner,项目名称:ahar,代码行数:10,代码来源:IndexDirectory.java


注:本文中的org.apache.hadoop.fs.FileStatus.getModificationTime方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。