当前位置: 首页>>代码示例>>Java>>正文


Java ByteArrayOutputStream.write方法代码示例

本文整理汇总了Java中org.apache.commons.io.output.ByteArrayOutputStream.write方法的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayOutputStream.write方法的具体用法?Java ByteArrayOutputStream.write怎么用?Java ByteArrayOutputStream.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.commons.io.output.ByteArrayOutputStream的用法示例。


在下文中一共展示了ByteArrayOutputStream.write方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convertBufferToBytes

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
/**
 * Converts a BufferedInputStream to a byte array
 *
 * @param inputStream
 * @param bufferLength
 * @return
 * @throws IOException
 */
private byte[] convertBufferToBytes(BufferedInputStream inputStream, int bufferLength) throws IOException {
    if (inputStream == null)
        return null;
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    byte[] buffer = new byte[bufferLength];
    int x = inputStream.read(buffer, 0, bufferLength);
    Log.i("GraphServiceController", "bytes read from picture input stream " + String.valueOf(x));

    int n = 0;
    try {
        while ((n = inputStream.read(buffer, 0, bufferLength)) >= 0) {
            outputStream.write(buffer, 0, n);
        }
        inputStream.close();
    } catch (IOException e) {
        e.printStackTrace();
    }

    outputStream.close();
    return outputStream.toByteArray();
}
 
开发者ID:microsoftgraph,项目名称:android-java-connect-sample,代码行数:30,代码来源:GraphServiceController.java

示例2: wrap

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
public static WarpScriptStackFunction wrap(String name, InputStream in, boolean secure) {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  
  byte[] buf = new byte[1024];

  try {
    while(true) {
      int len = in.read(buf);
      
      if (len < 0) {
        break;
      }
      
      baos.write(buf, 0, len);
    }      
    
    in.close();
    
    String mc2 = new String(baos.toByteArray(), Charsets.UTF_8);
    
    return wrap(name, mc2, secure);
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
开发者ID:cityzendata,项目名称:warp10-platform,代码行数:26,代码来源:MacroHelper.java

示例3: downloadSettleTB

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
/**
 * 结算制表下载
 * @param batch 结算批次号
 * @return
 * @throws Exception
 */
public FileTransfer downloadSettleTB(String batch) throws Exception {
	String fileName = batch + "settleTB.xls";
	File settlementFile = new File(Ryt.getParameter("SettlementFilePath") + fileName);
	if (settlementFile.exists()) {
		BufferedInputStream bis = null;
		try {
			InputStream is = new FileInputStream(settlementFile);
			ByteArrayOutputStream buffer = new ByteArrayOutputStream();
			buffer.write(is);
			return new FileTransfer(fileName, "application/x-xls", buffer.toByteArray());
		} catch (IOException e) {
			throw e;
		} finally {
			if (bis != null)
				bis.close();
		}
	} else {
		return null;
	}
}
 
开发者ID:wufeisoft,项目名称:ryf_mms2,代码行数:27,代码来源:SettlementService.java

示例4: getResponseBytes

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
@Nullable
protected final byte[] getResponseBytes(HttpResponse response) throws IOException, HttpException {
    try (InputStream stream = HttpUtils.getContent(response.getEntity())) {
        ByteArrayOutputStream output = new ByteArrayOutputStream();
        int n;
        byte[] buffer = new byte[BUFFER_SIZE];
        while (IOUtils.EOF != (n = stream.read(buffer)) && !isInterrupted()) {
            output.write(buffer, 0, n);
        }
        return isInterrupted() ? null : output.toByteArray();
    }
}
 
开发者ID:kalikov,项目名称:lighthouse,代码行数:13,代码来源:AbstractHttpLoader.java

示例5: serialize

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
/**
 * Serialize as appropriate to send a record to Kafka that contains information
 * pertaining to the schema version that applies to this record.
 *
 * @param record a GenericRecord
 * @return a byte array representing the encoded Generic record + schema ID
 * @throws IOException if there is a problem with the encoding
 */
public byte[] serialize(GenericRecord record) throws IOException {
      
    BinaryEncoder encoder = null;
    Schema schema = record.getSchema();
    
    byte[] rval;

    // register the schema. 
    // TODO: determine if we need getName() or getFullName()
    int schemaId = registerSchema(schema.getName(), schema);
    
    // serialize the record into a byte array
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    out.write(magic_byte);
    out.write(ByteBuffer.allocate(idSize).putInt(schemaId).array());
    //DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(schema);
    DatumWriter<Object> writer = new GenericDatumWriter<Object>(schema);

    encoder = org.apache.avro.io.EncoderFactory.get().directBinaryEncoder(out, encoder);
    writer.write(record, encoder);
    encoder.flush();
    rval = out.toByteArray();
    //out.close(); // noop in the Apache version, so not bothering
    
    return rval;
}
 
开发者ID:oracle,项目名称:bdglue,代码行数:35,代码来源:KafkaRegistry.java

示例6: computeHash

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
private static long computeHash(long k0, long k1, long timestamp, List<String> classSelectors, List<Map<String,String>> labelsSelectors) {
  //
  // Create a ByteArrayOutputStream into which the content will be dumped
  //
  
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  
  // Add timestamp
  
  try {
    baos.write(Longs.toByteArray(timestamp));
    
    if (null != classSelectors) {
      for (String classSelector: classSelectors) {
        baos.write(classSelector.getBytes(Charsets.UTF_8));
      }
    }
    
    if (null != labelsSelectors) {
      for (Map<String, String> map: labelsSelectors) {
        TreeMap<String,String> tm = new TreeMap<String, String>();
        tm.putAll(map);
        for (Entry<String,String> entry: tm.entrySet()) {
          baos.write(entry.getKey().getBytes(Charsets.UTF_8));
          baos.write(entry.getValue().getBytes(Charsets.UTF_8));
        }
      }
    }      
  } catch (IOException ioe) {
    return 0L;
  }
  
  // Compute hash
  
  byte[] data = baos.toByteArray();
  
  long hash = SipHashInline.hash24(k0, k1, data, 0, data.length);
  
  return hash;    
}
 
开发者ID:cityzendata,项目名称:warp10-platform,代码行数:41,代码来源:DirectoryUtil.java

示例7: downloadTXTFile

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
public FileTransfer downloadTXTFile(String contents, String filename)
		throws Exception {
	if (contents == null || contents.length() == 0) {
		contents = "";// new StringBuffer("");
	}
	ByteArrayOutputStream buffer = new ByteArrayOutputStream();
	buffer.write(contents.toString().getBytes());
	return new FileTransfer(filename, "application/text", buffer
			.toByteArray());
}
 
开发者ID:wufeisoft,项目名称:ryf_mms2,代码行数:11,代码来源:DownloadFile.java

示例8: testCompressedBuffer

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
@Test
public void testCompressedBuffer() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
   * InputStream and makes it unavailable for reading.
   */
  ByteArrayOutputStream captured = new ByteArrayOutputStream();
  Answer answer = new Answer() {
    @Override
    public Object answer(InvocationOnMock invocation) throws Throwable {
      UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
      captured.write(req.getInputStream());
      return new UploadPartResult();
    }
  };

  Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);

  buffer.close();
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("bucket", argument.getValue().getBucketName());
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
  assertEquals(1, argument.getValue().getPartNumber());
  assertEquals(40, argument.getValue().getPartSize());
  assertEquals("123", argument.getValue().getUploadId());

  /*
   * Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
   * and verified.
   */
  byte[] actualBytes = captured.toByteArray();
  byte[] expectedBytes =
      {66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0,
          -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54};

  assertArrayEquals(expectedBytes, actualBytes);
}
 
开发者ID:Nextdoor,项目名称:bender,代码行数:70,代码来源:S3TransporterTest.java

示例9: testCompressed

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
@Test
public void testCompressed() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
   * InputStream and makes it unavailable for reading.
   */
  ByteArrayOutputStream captured = new ByteArrayOutputStream();
  Answer answer = new Answer() {
    @Override
    public Object answer(InvocationOnMock invocation) throws Throwable {
      UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
      captured.write(req.getInputStream());
      return new UploadPartResult();
    }
  };

  Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);

  buffer.close();
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("bucket", argument.getValue().getBucketName());
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
  assertEquals(1, argument.getValue().getPartNumber());
  assertEquals(40, argument.getValue().getPartSize());
  assertEquals("123", argument.getValue().getUploadId());

  /*
   * Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
   * and verified.
   */
  byte[] actualBytes = captured.toByteArray();
  byte[] expectedBytes =
      {66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0,
          -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54};

  assertArrayEquals(expectedBytes, actualBytes);
}
 
开发者ID:Nextdoor,项目名称:bender,代码行数:70,代码来源:S3TransporterTest.java

示例10: fromBlock

import org.apache.commons.io.output.ByteArrayOutputStream; //导入方法依赖的package包/类
public static GTSDecoder fromBlock(byte[] block, byte[] key) throws IOException {
  
  if (block.length < 6) {
    throw new IOException("Invalid block.");
  }
  
  ByteBuffer buffer = ByteBuffer.wrap(block);

  //
  // Extract size
  //
  
  buffer.order(ByteOrder.BIG_ENDIAN);
  int size = buffer.getInt();
      
  // Check size
  
  if (block.length != size) {
    throw new IOException("Invalid block size, expected " + size + ", block is " + block.length);
  }
  
  // Extract compression
  
  byte comp = buffer.get();
  
  boolean compress = false;
  
  if (0 == comp) {
    compress = false;
  } else if (1 == comp) {
    compress = true;
  } else {
    throw new IOException("Invalid compression flag");
  }
  
  // Extract base timestamp
  
  long base = Varint.decodeSignedLong(buffer);

  InputStream in;
  
  ByteArrayInputStream bain = new ByteArrayInputStream(block, buffer.position(), buffer.remaining());
  
  if (compress) {
    in = new GZIPInputStream(bain);
  } else {
    in = bain;
  }
  
  byte[] buf = new byte[1024];
  
  ByteArrayOutputStream out = new ByteArrayOutputStream(buffer.remaining());
  
  while(true) {
    int len = in.read(buf);
    
    if (len <= 0) {
      break;
    }
    out.write(buf, 0, len);
  }
  
  GTSDecoder decoder = new GTSDecoder(base, key, ByteBuffer.wrap(out.toByteArray()));
  
  return decoder;
}
 
开发者ID:cityzendata,项目名称:warp10-platform,代码行数:67,代码来源:GTSDecoder.java


注:本文中的org.apache.commons.io.output.ByteArrayOutputStream.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。