当前位置: 首页>>代码示例>>Java>>正文


Java OperationContext类代码示例

本文整理汇总了Java中com.microsoft.azure.storage.OperationContext的典型用法代码示例。如果您正苦于以下问题:Java OperationContext类的具体用法?Java OperationContext怎么用?Java OperationContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


OperationContext类属于com.microsoft.azure.storage包,在下文中一共展示了OperationContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: waitForCopyToComplete

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
private void waitForCopyToComplete(CloudBlobWrapper blob, OperationContext opContext){
  boolean copyInProgress = true;
  while (copyInProgress) {
    try {
      blob.downloadAttributes(opContext);
      }
    catch (StorageException se){
    }

    // test for null because mocked filesystem doesn't know about copystates yet.
    copyInProgress = (blob.getCopyState() != null && blob.getCopyState().getStatus() == CopyStatus.PENDING);
    if (copyInProgress) {
      try {
        Thread.sleep(1000);
        }
        catch (InterruptedException ie){
          //ignore
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:AzureNativeFileSystemStore.java

示例2: testWrite

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
@Test
public void testWrite() throws Exception {
    final Host host = new Host(new AzureProtocol(), "kahy9boj3eib.blob.core.windows.net", new Credentials(
            System.getProperties().getProperty("azure.account"), System.getProperties().getProperty("azure.key")
    ));
    final AzureSession session = new AzureSession(host);
    session.open(new DisabledHostKeyCallback(), new DisabledLoginCallback());
    session.login(new DisabledPasswordStore(), new DisabledLoginCallback(), new DisabledCancelCallback());
    final TransferStatus status = new TransferStatus();
    final byte[] content = RandomUtils.nextBytes(1048576);
    status.setLength(content.length);
    final Path home = new Path("cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
    final CryptoVault cryptomator = new CryptoVault(
            new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new DisabledPasswordStore());
    final Path vault = cryptomator.create(session, null, new VaultCredentials("test"));
    session.withRegistry(new DefaultVaultRegistry(new DisabledPasswordStore(), new DisabledPasswordCallback(), cryptomator));
    final CryptoWriteFeature<Void> writer = new CryptoWriteFeature<>(session, new AzureWriteFeature(session, null), cryptomator);
    final Cryptor cryptor = cryptomator.getCryptor();
    final FileHeader header = cryptor.fileHeaderCryptor().create();
    status.setHeader(cryptor.fileHeaderCryptor().encryptHeader(header));
    status.setNonces(new RotatingNonceGenerator(cryptomator.numberOfChunks(content.length)));
    final Path test = new Path(vault, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
    status.setChecksum(writer.checksum(test).compute(new ByteArrayInputStream(content), status));
    final OutputStream out = writer.write(test, status, new DisabledConnectionCallback());
    assertNotNull(out);
    new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
    out.close();
    final OperationContext context = new OperationContext();
    assertTrue(new CryptoFindFeature(session, new AzureFindFeature(session, context), cryptomator).find(test));
    assertEquals(content.length, new CryptoListService(session, session, cryptomator).list(test.getParent(), new DisabledListProgressListener()).get(test).attributes().getSize());
    assertEquals(content.length, new CryptoWriteFeature<>(session, new AzureWriteFeature(session, context, new DefaultFindFeature(session), new DefaultAttributesFinderFeature(session)), cryptomator).append(test, status.getLength(), PathCache.empty()).size, 0L);
    assertEquals(content.length, new CryptoWriteFeature<>(session, new AzureWriteFeature(session, context, new AzureFindFeature(session, context), new AzureAttributesFinderFeature(session, context)), cryptomator).append(test, status.getLength(), PathCache.empty()).size, 0L);
    final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length);
    final InputStream in = new CryptoReadFeature(session, new AzureReadFeature(session, context), cryptomator).read(test, new TransferStatus().length(content.length), new DisabledConnectionCallback());
    new StreamCopier(status, status).transfer(in, buffer);
    assertArrayEquals(content, buffer.toByteArray());
    new CryptoDeleteFeature(session, new AzureDeleteFeature(session, context), cryptomator).delete(Arrays.asList(test, vault), new DisabledLoginCallback(), new Delete.DisabledCallback());
    session.close();
}
 
开发者ID:iterate-ch,项目名称:cyberduck,代码行数:40,代码来源:AzureWriteFeatureTest.java

示例3: hook

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
public static void hook(OperationContext operationContext, float readFactor,
    float writeFactor) {

  SelfThrottlingIntercept throttler = new SelfThrottlingIntercept(
      operationContext, readFactor, writeFactor);
  ResponseReceivedListener responseListener = throttler.new ResponseReceivedListener();
  SendingRequestListener sendingListener = throttler.new SendingRequestListener();

  operationContext.getResponseReceivedEventHandler().addListener(
      responseListener);
  operationContext.getSendingRequestEventHandler().addListener(
      sendingListener);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:SelfThrottlingIntercept.java

示例4: SendRequestIntercept

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Constructor for SendRequestThrottle.
 * 
 * @param storageCreds
 *          - storage account credentials for signing packets.
 * 
 */
private SendRequestIntercept(StorageCredentials storageCreds,
    boolean allowConcurrentOOBIo, OperationContext opContext) {
  // Capture the send delay callback interface.
  this.storageCreds = storageCreds;
  this.allowConcurrentOOBIo = allowConcurrentOOBIo;
  this.opContext = opContext;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:SendRequestIntercept.java

示例5: hook

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Hooks a new listener to the given operationContext that will update the
 * error metrics for the WASB file system appropriately in response to
 * ResponseReceived events.
 *
 * @param operationContext The operationContext to hook.
 * @param instrumentation The metrics source to update.
 */
public static void hook(
    OperationContext operationContext,
    AzureFileSystemInstrumentation instrumentation) {
  ErrorMetricUpdater listener =
      new ErrorMetricUpdater(operationContext,
          instrumentation);
  operationContext.getResponseReceivedEventHandler().addListener(listener);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:ErrorMetricUpdater.java

示例6: hook

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Hooks a new listener to the given operationContext that will update the
 * metrics for the WASB file system appropriately in response to
 * ResponseReceived events.
 *
 * @param operationContext The operationContext to hook.
 * @param instrumentation The metrics source to update.
 * @param blockUploadGaugeUpdater The blockUploadGaugeUpdater to use.
 */
public static void hook(
    OperationContext operationContext,
    AzureFileSystemInstrumentation instrumentation,
    BandwidthGaugeUpdater blockUploadGaugeUpdater) {
  ResponseReceivedMetricUpdater listener =
      new ResponseReceivedMetricUpdater(operationContext,
          instrumentation, blockUploadGaugeUpdater);
  operationContext.getResponseReceivedEventHandler().addListener(listener);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:ResponseReceivedMetricUpdater.java

示例7: listBlobs

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
@Override
public Iterable<ListBlobItem> listBlobs(String prefix,
    boolean useFlatBlobListing, EnumSet<BlobListingDetails> listingDetails,
    BlobRequestOptions options, OperationContext opContext)
    throws URISyntaxException, StorageException {
  return WrappingIterator.wrap(directory.listBlobs(prefix,
      useFlatBlobListing, listingDetails, options, opContext));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:StorageInterfaceImpl.java

示例8: startCopyFromBlob

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
@Override
public void startCopyFromBlob(URI source, BlobRequestOptions options,
    OperationContext opContext)
        throws StorageException, URISyntaxException {
  getBlob().startCopyFromBlob(source,
      null, null, options, opContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:StorageInterfaceImpl.java

示例9: downloadRange

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
@Override
public void downloadRange(long offset, long length, OutputStream outStream,
    BlobRequestOptions options, OperationContext opContext)
        throws StorageException, IOException {

  getBlob().downloadRange(offset, length, outStream, null, options, opContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:StorageInterfaceImpl.java

示例10: downloadPageRanges

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
public ArrayList<PageRange> downloadPageRanges(BlobRequestOptions options,
    OperationContext opContext) throws StorageException {
  return ((CloudPageBlob) getBlob()).downloadPageRanges(
      null, options, opContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:StorageInterfaceImpl.java

示例11: getInstrumentedContext

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Creates a new OperationContext for the Azure Storage operation that has
 * listeners hooked to it that will update the metrics for this file system.
 * 
 * @param bindConcurrentOOBIo
 *          - bind to intercept send request call backs to handle OOB I/O.
 * 
 * @return The OperationContext object to use.
 */
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {

  OperationContext operationContext = new OperationContext();

  if (selfThrottlingEnabled) {
    SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor,
        selfThrottlingWriteFactor);
  }

  if(bandwidthGaugeUpdater != null) {
    //bandwidthGaugeUpdater is null when we config to skip azure metrics
    ResponseReceivedMetricUpdater.hook(
       operationContext,
       instrumentation,
       bandwidthGaugeUpdater);
  }

  // Bind operation context to receive send request callbacks on this operation.
  // If reads concurrent to OOB writes are allowed, the interception will reset
  // the conditional header on all Azure blob storage read requests.
  if (bindConcurrentOOBIo) {
    SendRequestIntercept.bind(storageInteractionLayer.getCredentials(),
        operationContext, true);
  }

  if (testHookOperationContext != null) {
    operationContext =
        testHookOperationContext.modifyOperationContext(operationContext);
  }

  ErrorMetricUpdater.hook(operationContext, instrumentation);

  // Return the operation context.
  return operationContext;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:AzureNativeFileSystemStore.java

示例12: safeDelete

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Deletes the given blob, taking special care that if we get a
 * blob-not-found exception upon retrying the operation, we just
 * swallow the error since what most probably happened is that
 * the first operation succeeded on the server.
 * @param blob The blob to delete.
 * @param lease Azure blob lease, or null if no lease is to be used.
 * @throws StorageException
 */
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
  OperationContext operationContext = getInstrumentedContext();
  try {
    blob.delete(operationContext, lease);
  } catch (StorageException e) {
    // On exception, check that if:
    // 1. It's a BlobNotFound exception AND
    // 2. It got there after one-or-more retries THEN
    // we swallow the exception.
    if (e.getErrorCode() != null &&
        e.getErrorCode().equals("BlobNotFound") &&
        operationContext.getRequestResults().size() > 1 &&
        operationContext.getRequestResults().get(0).getException() != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Swallowing delete exception on retry: " + e.getMessage());
      }
      return;
    } else {
      throw e;
    }
  } finally {
    if (lease != null) {
      lease.free();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:AzureNativeFileSystemStore.java

示例13: getPageBlobSize

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Helper method to extract the actual data size of a page blob.
 * This typically involves 2 service requests (one for page ranges, another
 * for the last page's data).
 *
 * @param blob The blob to get the size from.
 * @param opContext The operation context to use for the requests.
 * @return The total data size of the blob in bytes.
 * @throws IOException If the format is corrupt.
 * @throws StorageException If anything goes wrong in the requests.
 */
public static long getPageBlobSize(CloudPageBlobWrapper blob,
    OperationContext opContext) throws IOException, StorageException {
  // Get the page ranges for the blob. There should be one range starting
  // at byte 0, but we tolerate (and ignore) ranges after the first one.
  ArrayList<PageRange> pageRanges =
      blob.downloadPageRanges(new BlobRequestOptions(), opContext);
  if (pageRanges.size() == 0) {
    return 0;
  }
  if (pageRanges.get(0).getStartOffset() != 0) {
    // Not expected: we always upload our page blobs as a contiguous range
    // starting at byte 0.
    throw badStartRangeException(blob, pageRanges.get(0));
  }
  long totalRawBlobSize = pageRanges.get(0).getEndOffset() + 1;

  // Get the last page.
  long lastPageStart = totalRawBlobSize - PAGE_SIZE;
  ByteArrayOutputStream baos = 
      new ByteArrayOutputStream(PageBlobFormatHelpers.PAGE_SIZE);
  blob.downloadRange(lastPageStart, PAGE_SIZE, baos,
      new BlobRequestOptions(), opContext);

  byte[] lastPage = baos.toByteArray();
  short lastPageSize = getPageSize(blob, lastPage, 0);
  long totalNumberOfPages = totalRawBlobSize / PAGE_SIZE;
  return (totalNumberOfPages - 1) * PAGE_DATA_SIZE + lastPageSize;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:PageBlobInputStream.java

示例14: PageBlobInputStream

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
/**
 * Constructs a stream over the given page blob.
 */
public PageBlobInputStream(CloudPageBlobWrapper blob,
    OperationContext opContext)
    throws IOException {
  this.blob = blob;
  this.opContext = opContext;
  ArrayList<PageRange> allRanges;
  try {
    allRanges =
        blob.downloadPageRanges(new BlobRequestOptions(), opContext);
  } catch (StorageException e) {
    throw new IOException(e);
  }
  if (allRanges.size() > 0) {
    if (allRanges.get(0).getStartOffset() != 0) {
      throw badStartRangeException(blob, allRanges.get(0));
    }
    if (allRanges.size() > 1) {
      LOG.warn(String.format(
          "Blob %s has %d page ranges beyond the first range. " 
          + "Only reading the first range.",
          blob.getUri(), allRanges.size() - 1));
    }
    numberOfPagesRemaining =
        (allRanges.get(0).getEndOffset() + 1) / PAGE_SIZE;
  } else {
    numberOfPagesRemaining = 0;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:PageBlobInputStream.java

示例15: testCheckBlockMd5

import com.microsoft.azure.storage.OperationContext; //导入依赖的package包/类
private void testCheckBlockMd5(final boolean expectMd5Checked)
    throws Exception {
  assumeNotNull(testAccount);
  Path testFilePath = new Path("/testFile");

  // Add a hook to check that for GET/PUT requests we set/don't set
  // the block-level MD5 field as configured. I tried to do clever
  // testing by also messing with the raw data to see if we actually
  // validate the data as expected, but the HttpURLConnection wasn't
  // pluggable enough for me to do that.
  testAccount.getFileSystem().getStore()
  .addTestHookToOperationContext(new TestHookOperationContext() {
  @Override
        public OperationContext modifyOperationContext(
            OperationContext original) {
    original.getResponseReceivedEventHandler().addListener(
        new ContentMD5Checker(expectMd5Checked));
    return original;
        }
      });

  OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
  outStream.write(new byte[] { 5, 15 });
  outStream.close();

  InputStream inStream = testAccount.getFileSystem().open(testFilePath);
  byte[] inBuf = new byte[100];
  while (inStream.read(inBuf) > 0){
    //nothing;
  }
  inStream.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestBlobDataValidation.java


注:本文中的com.microsoft.azure.storage.OperationContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。