本文整理汇总了Java中com.mongodb.gridfs.GridFSInputFile.put方法的典型用法代码示例。如果您正苦于以下问题:Java GridFSInputFile.put方法的具体用法?Java GridFSInputFile.put怎么用?Java GridFSInputFile.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.mongodb.gridfs.GridFSInputFile
的用法示例。
在下文中一共展示了GridFSInputFile.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: uploadData
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
public DataAddress uploadData(String data, DataAddress dataAddress) throws UnknownHostException {
ServerAddress server = new ServerAddress(dataAddress.hostname, dataAddress.port);
GridFS database = connectToDatabase(server);
logger.info("Database connected");
GridFSInputFile file = database.createFile(data.getBytes());
int newID = getNextId(database);
logger.info("Got new id for uploaded file: " + newID);
file.setFilename(String.valueOf(newID));
file.put("_id", newID);
file.save();
logger.info("after save");
return new DataAddress(dataAddress.hostname, dataAddress.port, newID);
}
示例2: insertAnnexDocument
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Inserts publication annex document.
*/
public void insertAnnexDocument(BinaryFile bf, String dateString) throws ParseException {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.PUB_ANNEXES);
BasicDBObject whereQuery = new BasicDBObject();
whereQuery.put("repositoryDocId", bf.getRepositoryDocId());
whereQuery.put("filename", bf.getFileName());
gfs.remove(whereQuery);
//version ?
GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
gfsFile.setFilename(bf.getFileName());
gfsFile.put("source", bf.getSource());
gfsFile.put("version", bf.getRepositoryDocVersion());
gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例3: insertGrobidTei
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Inserts grobid tei using GridFS.
*/
public void insertGrobidTei(String teiString, String repositoryDocId, String anhalyticsId, String version, String source, String type, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.GROBID_TEIS);
gfs.remove(repositoryDocId + ".tei.xml");
GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(teiString.getBytes()), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(repositoryDocId + ".tei.xml");
gfsFile.put("repositoryDocId", repositoryDocId);
gfsFile.put("anhalyticsId", anhalyticsId);
gfsFile.put("source", source);
gfsFile.put("version", version);
gfsFile.put("documentType", type);
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例4: insertMetadataTei
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Inserts TEI metadata document in the GridFS.
*/
public void insertMetadataTei(String tei, String doi, String pdfUrl, String source, String repositoryDocId, String version, String type, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.METADATAS_TEIS);
gfs.remove(repositoryDocId + ".tei.xml");
GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(tei.getBytes()), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(repositoryDocId + ".tei.xml");
gfsFile.put("repositoryDocId", repositoryDocId);
gfsFile.put("anhalyticsId", generateAnhalyticsId(repositoryDocId, doi, pdfUrl));
gfsFile.put("source", source);
gfsFile.put("version", version);
gfsFile.put("documentType", type);
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例5: insertBinaryDocument
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Inserts PDF binary document in the GridFS.
*/
public void insertBinaryDocument(BinaryFile bf, String date) {
try {
GridFS gfs = new GridFS(db, MongoCollectionsInterface.BINARIES);
gfs.remove(bf.getFileName());
GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(bf.getFileName());
gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
gfsFile.put("source", bf.getSource());
gfsFile.put("version", bf.getRepositoryDocVersion());
gfsFile.put("documentType", bf.getDocumentType());
gfsFile.setContentType(bf.getFileType());
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例6: updateTei
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Updates already existing tei with new (more enriched one, fulltext..).
*/
public void updateTei(String newTei, String repositoryDocId, String collection) {
try {
GridFS gfs = new GridFS(db, collection);
GridFSDBFile gdf = gfs.findOne(repositoryDocId + ".tei.xml");
GridFSInputFile gfsNew = gfs.createFile(new ByteArrayInputStream(newTei.getBytes()), true);
gfsNew.put("uploadDate", gdf.getUploadDate());
gfsNew.setFilename(gdf.get("repositoryDocId") + ".tei.xml");
gfsNew.put("repositoryDocId", gdf.get("repositoryDocId"));
gfsNew.put("documentType", gdf.get("documentType"));
gfsNew.put("anhalyticsId", gdf.get("anhalyticsId"));
gfsNew.put("source", gdf.get("source"));
gfsNew.save();
gfs.remove(gdf);
} catch (Exception e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例7: insertExternalTeiDocument
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* inserts a Arxiv/istex TEI document in the GridFS.
*/
public void insertExternalTeiDocument(InputStream file, String identifier, String repository, String namespace, String dateString) {
try {
GridFS gfs = new GridFS(db, namespace);
GridFSInputFile gfsFile = gfs.createFile(file, true);
gfs.remove(identifier + ".pdf");
gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
gfsFile.setFilename(identifier + ".tei.xml");
gfsFile.put("identifier", identifier);
gfsFile.put("repository", repository);
gfsFile.setContentType("application/tei+xml");
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例8: concatGridFile
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* 保存文件到Mongo中
* @param file 文件对象
* @param id id_ 自定义序列
* @param metaData 元数据类型 Key Value
* @return
*/
public boolean concatGridFile(File file, Object id, DBObject metaData){
GridFSInputFile gridFSInputFile;
DBObject query = new BasicDBObject("_id", id);
GridFSDBFile gridFSDBFile = myFS.findOne(query);
if(gridFSDBFile!= null)
return false;
try {
gridFSInputFile = myFS.createFile(file);
gridFSInputFile.put("_id",id);
gridFSInputFile.setFilename(file.getName());
gridFSInputFile.setMetaData(metaData);
gridFSInputFile.setContentType(file.getName().substring(file.getName().lastIndexOf(".")));
gridFSInputFile.save();
} catch (Exception e) {
e.printStackTrace();
return false;
}
return true;
}
示例9: run
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
@Override
public void run() {
try{
File localPath = new File(localRoot, file.getFilename());
log.info("Save to local file:" + localPath.getAbsolutePath());
File dirName = localPath.getParentFile();
if(!dirName.exists()){
dirName.mkdirs();
}
file.writeTo(localPath);
GridFSInputFile newFile = fs.createFile(new byte[]{0, 0,});
newFile.setMetaData(file.getMetaData());
newFile.setFilename(file.getFilename());
newFile.put("localLength", file.getLength());
newFile.save(10);
//log.info("remove:%s" + file.getId() + ", fn:" + file.getFilename());
fs.remove((ObjectId)file.getId());
}catch(Throwable e){
log.error("Failed to dump file to local fs, error:" + e.toString(), e);
}
}
示例10: saveBlob
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
@Override
public void saveBlob(final MD5 md5, final InputStream data,
final boolean sorted)
throws BlobStoreCommunicationException {
if(data == null || md5 == null) {
throw new NullPointerException("Arguments cannot be null");
}
if (getFile(md5) != null) {
return; //already exists
}
final GridFSInputFile gif = gfs.createFile(data, true);
gif.setId(md5.getMD5());
gif.setFilename(md5.getMD5());
gif.put(Fields.GFS_SORTED, sorted);
try {
gif.save();
} catch (DuplicateKeyException dk) {
// already here, done
} catch (MongoException me) {
throw new BlobStoreCommunicationException(
"Could not write to the mongo database", me);
}
}
示例11: prefetchData
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
public DataAddress prefetchData(DataAddress givenAddress, ServerAddress destAddress) throws IOException {
logger.info("yo2");
ServerAddress givenServer = new ServerAddress(givenAddress.hostname, givenAddress.port);
GridFS givenDatabase = connectToDatabase(givenServer);
logger.info("yo");
GridFSDBFile givenPackage = givenDatabase.findOne(new BasicDBObject("_id", givenAddress.ID));
ByteArrayOutputStream baos = new ByteArrayOutputStream((int)givenPackage.getLength());
givenPackage.writeTo(baos);
logger.info("Prefetched");
GridFS destDatabase = connectToDatabase(destAddress);
GridFSInputFile destPackage = destDatabase.createFile(baos.toByteArray());
int newID = getNextId(destDatabase);
logger.info("Got new id for prefetched package: " + newID);
destPackage.put("_id", newID);
destPackage.save();
logger.info("after save");
DataAddress ret = new DataAddress();
ret.hostname = destAddress.getHost();
ret.port = destAddress.getPort();
ret.ID = newID;
return ret;
}
示例12: saveFile
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
*
* @param inputStream 文件流
* @param format 文件格式,“pdf”,“png”等,不包含后缀符号“.”
* @return
*/
public String saveFile(InputStream inputStream,String format,String uid) {
try {
GridFS gridFS = getInstance();
//随机生成文件名称,多次重试
String filename = this.randomFileName();
//如果有文件重复,则重新生成filename
while (true) {
GridFSDBFile _current = gridFS.findOne(filename);
//如果文件不存在,则保存操作
if (_current == null) {
break;
}
filename = this.randomFileName();
}
GridFSInputFile file = gridFS.createFile(inputStream, filename);
if(format != null) {
file.put("format", format);
}
if(uid != null) {
file.put("uid",uid);
}
file.put("content-type","application/octet-stream");
file.save();
return concat(filename,format);
}catch (Exception e) {
throw new RuntimeException(e);
} finally {
try{
inputStream.close();
}catch (Exception ex) {
//
}
}
}
示例13: build
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
public GridFSInputFile build(IGridFSSession gridFS) throws Exception {
GridFSInputFile _inFile = null;
switch (__type) {
case 1: // is File
_inFile = gridFS.getGridFS().createFile((File) __targetObject);
break;
case 2: // is InputStream
_inFile = gridFS.getGridFS().createFile((InputStream) __targetObject);
break;
case 3: // is Array
_inFile = gridFS.getGridFS().createFile((byte[]) __targetObject);
}
if (_inFile != null) {
_inFile.setFilename(__filename);
_inFile.setContentType(__contentType);
if (__chunkSize > 0) {
_inFile.setChunkSize(__chunkSize);
}
if (!__attributes.isEmpty()) {
for (Map.Entry<String, Object> _entry : __attributes.entrySet()) {
_inFile.put(_entry.getKey(), _entry.getValue());
}
}
}
return _inFile;
}
示例14: insertTei
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
/**
* Inserts generated tei using GridFS.
*/
public void insertTei(TEIFile tei, String date, String collection) {
try {
GridFS gfs = new GridFS(db, collection);
gfs.remove(tei.getFileName());
GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(tei.getTei().getBytes()), true);
gfsFile.put("uploadDate", Utilities.parseStringDate(date));
gfsFile.setFilename(tei.getFileName());
gfsFile.put("repositoryDocId", tei.getRepositoryDocId());
if (collection.equals(MongoCollectionsInterface.METADATAS_TEIS)) {
String anhalyticsID = generateAnhalyticsId(tei.getRepositoryDocId(), tei.getDoi(), (tei.getPdfdocument() != null) ? tei.getPdfdocument().getUrl() : null);
gfsFile.put("anhalyticsId", anhalyticsID);
if (tei.getPdfdocument() != null) {
tei.getPdfdocument().setAnhalyticsId(anhalyticsID);
}
for (BinaryFile annex : tei.getAnnexes()) {
annex.setAnhalyticsId(anhalyticsID);
}
} else {
gfsFile.put("anhalyticsId", tei.getAnhalyticsId());
}
gfsFile.put("source", tei.getSource());
gfsFile.put("version", tei.getRepositoryDocVersion());
gfsFile.put("documentType", tei.getDocumentType());
gfsFile.setContentType(tei.getFileType());
gfsFile.save();
} catch (ParseException e) {
logger.error(e.getMessage(), e.getCause());
}
}
示例15: makeBloomFilter
import com.mongodb.gridfs.GridFSInputFile; //导入方法依赖的package包/类
private void makeBloomFilter(ArrayList<String> chunk, int distributionID) {
final String firstResource = chunk.get(0);
final String lastResource = chunk.get(chunk.size() - 1);
int chunkSize = chunk.size();
if (chunkSize < 5000)
chunkSize = 5000;
BloomFilterI filter = BloomFilterFactory.newBloomFilter();
filter.create(chunkSize, fpp);
for (String resource : chunk) {
filter.add(resource);
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
filter.writeTo(out);
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
GridFS gfs = new GridFS(DBSuperClass2.getDBInstance(), COLLECTION_NAME);
GridFSInputFile gfsFile;
try {
gfsFile = gfs.createFile(new BufferedInputStream(new ByteArrayInputStream(out.toByteArray())));
gfsFile.put(FIRST_RESOURCE, firstResource);
gfsFile.put(LAST_RESOURCE, lastResource);
gfsFile.put(DISTRIBUTION_ID, distributionID);
gfsFile.save();
} catch (Exception e) {
System.out.println(firstResource);
System.out.println(lastResource);
System.out.println(distributionID);
// TODO Auto-generated catch block
e.printStackTrace();
}
}