当前位置: 首页>>代码示例>>Java>>正文


Java MutableBoolean.setValue方法代码示例

本文整理汇总了Java中org.apache.commons.lang.mutable.MutableBoolean.setValue方法的典型用法代码示例。如果您正苦于以下问题:Java MutableBoolean.setValue方法的具体用法?Java MutableBoolean.setValue怎么用?Java MutableBoolean.setValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.commons.lang.mutable.MutableBoolean的用法示例。


在下文中一共展示了MutableBoolean.setValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: applyFilters

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private void applyFilters(ScanTreeNode unfilteredRoot, ScanTreeNode issuesFilteredRoot, ScanTreeNode licenseFilteredRoot, MutableBoolean severitySelected, MutableBoolean licenseSelected) {
    severitySelected.setValue(isSeveritySelected(unfilteredRoot));
    licenseSelected.setValue(isLicenseSelected(unfilteredRoot));
    for (int i = 0; i < unfilteredRoot.getChildCount(); i++) {
        ScanTreeNode unfilteredChild = (ScanTreeNode) unfilteredRoot.getChildAt(i);
        ScanTreeNode filteredSeverityChild = getFilteredTreeNode(unfilteredChild);
        ScanTreeNode filteredLicenseChild = (ScanTreeNode) unfilteredChild.clone();
        MutableBoolean childSeveritySelected = new MutableBoolean();
        MutableBoolean childLicenseSelected = new MutableBoolean();
        applyFilters(unfilteredChild, filteredSeverityChild, filteredLicenseChild, childSeveritySelected, childLicenseSelected);
        if (childSeveritySelected.booleanValue()) {
            severitySelected.setValue(true);
            issuesFilteredRoot.add(filteredSeverityChild);
        }
        if (childLicenseSelected.booleanValue()) {
            licenseSelected.setValue(true);
            licenseFilteredRoot.add(filteredLicenseChild);
        }
    }
}
 
开发者ID:JFrogDev,项目名称:jfrog-idea-plugin,代码行数:21,代码来源:FilterManager.java

示例2: handleThrowable

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
public void handleThrowable(Throwable t1, ServerName serverName,
    MutableBoolean couldNotCommunicateWithServer) throws IOException {
  Throwable t2 = translateException(t1);
  boolean isLocalException = !(t2 instanceof RemoteException);
  if (isLocalException && isConnectionException(t2)) {
    couldNotCommunicateWithServer.setValue(true);
    handleFailureToServer(serverName, t2);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:PreemptiveFastFailInterceptor.java

示例3: addToken

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private void addToken(TokenGroup group, int start, int end, String charString, MutableBoolean isNewLine,
                      Token lastToken) {
    Token token = new Token(start, end, charString);
    if (isNewLine.booleanValue()) {
        group.addNewLine(start);
        isNewLine.setValue(false);
    }
    token.setPreceedBySpace(start - lastToken.getEnd() > 0);

    int spaces = 0;
    if (lastToken != null && lastToken.getEnd() != 0) {
        int endLast = lastToken.getEnd();
        spaces = lastToken.getSpaceOffset();
        if (start == endLast) {
            spaces++;
        } else {
            spaces -= Math.max(0, start - endLast - 1);
        }
    }
    token.setSpaceOffset(spaces);

    // Normalization
    String n;
    if (charString.length() == 1) {
        int c = charString.charAt(0);
        n = normalizedChars.get(c);
    } else {
        n = normalizedStrings.get(charString);
    }
    if (n != null) {
        token.setNormForm(n);
    }

    lastToken.updateByToken(token);
    group.addToken(token);
}
 
开发者ID:dhfbk,项目名称:tint,代码行数:37,代码来源:ItalianTokenizer.java

示例4: readBlobFromRDD

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
@Override
protected FrameBlock readBlobFromRDD(RDDObject rdd, MutableBoolean status)
		throws IOException 
{
	//note: the read of a frame block from an RDD might trigger
	//lazy evaluation of pending transformations.
	RDDObject lrdd = rdd;

	//prepare return status (by default only collect)
	status.setValue(false);
	
	MetaDataFormat iimd = (MetaDataFormat) _metaData;
	MatrixCharacteristics mc = iimd.getMatrixCharacteristics();
	int rlen = (int)mc.getRows();
	int clen = (int)mc.getCols();
	
	//handle missing schema if necessary
	ValueType[] lschema = (_schema!=null) ? _schema : 
		UtilFunctions.nCopies(clen>=1 ? (int)clen : 1, ValueType.STRING);
	
	FrameBlock fb = null;
	try  {
		//prevent unnecessary collect through rdd checkpoint
		if( rdd.allowsShortCircuitCollect() ) {
			lrdd = (RDDObject)rdd.getLineageChilds().get(0);
		}
		
		//collect frame block from binary block RDD
		fb = SparkExecutionContext.toFrameBlock(lrdd, lschema, rlen, clen);	
	}
	catch(DMLRuntimeException ex) {
		throw new IOException(ex);
	}
	
	//sanity check correct output
	if( fb == null )
		throw new IOException("Unable to load frame from rdd.");
	
	return fb;
}
 
开发者ID:apache,项目名称:systemml,代码行数:41,代码来源:FrameObject.java

示例5: checkProhibitedValues

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private <O extends ObjectType, R extends ObjectType> boolean checkProhibitedValues(String newPassword, ProhibitedValuesType prohibitedValuesType, AbstractValuePolicyOriginResolver<O> originResolver,
		Consumer<ProhibitedValueItemType> failAction, String shortDesc, Task task, OperationResult result) throws SchemaException, ObjectNotFoundException, ExpressionEvaluationException, CommunicationException, ConfigurationException, SecurityViolationException {

	if (prohibitedValuesType == null || originResolver == null) {
		return true;
	}
	
	MutableBoolean isAcceptable = new MutableBoolean(true);
	for (ProhibitedValueItemType prohibitedItemType: prohibitedValuesType.getItem()) {
		
		ItemPathType itemPathType = prohibitedItemType.getPath();
		if (itemPathType == null) {
			throw new SchemaException("No item path defined in prohibited item in "+shortDesc);
		}
		ItemPath itemPath = itemPathType.getItemPath();
		
		ResultHandler<R> handler = (object, objectResult) -> {
			
			PrismProperty<Object> objectProperty = object.findProperty(itemPath);
			if (objectProperty == null) {
				return true;
			}
			
			if (isMatching(newPassword, objectProperty)) {
				if (failAction != null) {
					failAction.accept(prohibitedItemType);
				}
				isAcceptable.setValue(false);
				return false;
			}
			
			return true;
		};
		originResolver.resolve(handler, prohibitedItemType, shortDesc, task, result);			
	}

	return isAcceptable.booleanValue();
}
 
开发者ID:Evolveum,项目名称:midpoint,代码行数:39,代码来源:ValuePolicyProcessor.java

示例6: toByteArray

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private static final byte[] toByteArray(final HttpEntity entity,
        int maxContent, MutableBoolean trimmed) throws IOException {

    if (entity == null)
        return new byte[] {};

    final InputStream instream = entity.getContent();
    if (instream == null) {
        return null;
    }
    Args.check(entity.getContentLength() <= Integer.MAX_VALUE,
            "HTTP entity too large to be buffered in memory");
    int reportedLength = (int) entity.getContentLength();
    // set default size for buffer: 100 KB
    int bufferInitSize = 102400;
    if (reportedLength != -1) {
        bufferInitSize = reportedLength;
    }
    // avoid init of too large a buffer when we will trim anyway
    if (maxContent != -1 && bufferInitSize > maxContent) {
        bufferInitSize = maxContent;
    }
    final ByteArrayBuffer buffer = new ByteArrayBuffer(bufferInitSize);
    final byte[] tmp = new byte[4096];
    int lengthRead;
    while ((lengthRead = instream.read(tmp)) != -1) {
        // check whether we need to trim
        if (maxContent != -1 && buffer.length() + lengthRead > maxContent) {
            buffer.append(tmp, 0, maxContent - buffer.length());
            trimmed.setValue(true);
            break;
        }
        buffer.append(tmp, 0, lengthRead);
    }
    return buffer.toByteArray();
}
 
开发者ID:DigitalPebble,项目名称:storm-crawler,代码行数:37,代码来源:HttpProtocol.java

示例7: allocSlot

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
/**
 * Allocate a new shared memory slot connected to this datanode.
 *
 * Must be called with the EndpointShmManager lock held.
 *
 * @param peer          The peer to use to talk to the DataNode.
 * @param usedPeer      (out param) Will be set to true if we used the peer.
 *                        When a peer is used
 *
 * @param clientName    The client name.
 * @param blockId       The block ID to use.
 * @return              null if the DataNode does not support shared memory
 *                        segments, or experienced an error creating the
 *                        shm.  The shared memory segment itself on success.
 * @throws IOException  If there was an error communicating over the socket.
 */
Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer,
    String clientName, ExtendedBlockId blockId) throws IOException {
  while (true) {
    if (closed) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": the DfsClientShmManager has been closed.");
      }
      return null;
    }
    if (disabled) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": shared memory segment access is disabled.");
      }
      return null;
    }
    // Try to use an existing slot.
    Slot slot = allocSlotFromExistingShm(blockId);
    if (slot != null) {
      return slot;
    }
    // There are no free slots.  If someone is loading more slots, wait
    // for that to finish.
    if (loading) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": waiting for loading to finish...");
      }
      finishedLoading.awaitUninterruptibly();
    } else {
      // Otherwise, load the slot ourselves.
      loading = true;
      lock.unlock();
      DfsClientShm shm;
      try {
        shm = requestNewShm(clientName, peer);
        if (shm == null) continue;
        // See #{DfsClientShmManager#domainSocketWatcher} for details
        // about why we do this before retaking the manager lock.
        domainSocketWatcher.add(peer.getDomainSocket(), shm);
        // The DomainPeer is now our responsibility, and should not be
        // closed by the caller.
        usedPeer.setValue(true);
      } finally {
        lock.lock();
        loading = false;
        finishedLoading.signalAll();
      }
      if (shm.isDisconnected()) {
        // If the peer closed immediately after the shared memory segment
        // was created, the DomainSocketWatcher callback might already have
        // fired and marked the shm as disconnected.  In this case, we
        // obviously don't want to add the SharedMemorySegment to our list
        // of valid not-full segments.
        if (LOG.isDebugEnabled()) {
          LOG.debug(this + ": the UNIX domain socket associated with " +
              "this short-circuit memory closed before we could make " +
              "use of the shm.");
        }
      } else {
        notFull.put(shm.getShmId(), shm);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:DfsClientShmManager.java

示例8: test

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
@Test
public void test() throws IOException, InterruptedException {
  final HRegionServer rs = testUtil.getRSForFirstRegionInTable(tableName);
  final HRegion region = (HRegion) rs.getOnlineRegions(tableName).get(0);
  HRegion spiedRegion = spy(region);
  final MutableBoolean flushed = new MutableBoolean(false);
  final MutableBoolean reported = new MutableBoolean(false);
  doAnswer(new Answer<FlushResult>() {
    @Override
    public FlushResult answer(InvocationOnMock invocation) throws Throwable {
      synchronized (flushed) {
        flushed.setValue(true);
        flushed.notifyAll();
      }
      synchronized (reported) {
        while (!reported.booleanValue()) {
          reported.wait();
        }
      }
      rs.getWAL(region.getRegionInfo()).abortCacheFlush(
        region.getRegionInfo().getEncodedNameAsBytes());
      throw new DroppedSnapshotException("testcase");
    }
  }).when(spiedRegion).internalFlushCacheAndCommit(Matchers.<WAL> any(),
    Matchers.<MonitoredTask> any(), Matchers.<PrepareFlushResult> any(),
    Matchers.<Collection<Store>> any());
  // Find region key; don't pick up key for hbase:meta by mistake.
  String key = null;
  for (Map.Entry<String, Region> entry: rs.onlineRegions.entrySet()) {
    if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) {
      key = entry.getKey();
      break;
    }
  }
  rs.onlineRegions.put(key, spiedRegion);
  Connection conn = testUtil.getConnection();

  try (Table table = conn.getTable(tableName)) {
    table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0")));
  }
  long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family);
  LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore);
  assertTrue(oldestSeqIdOfStore > HConstants.NO_SEQNUM);
  rs.cacheFlusher.requestFlush(spiedRegion, false);
  synchronized (flushed) {
    while (!flushed.booleanValue()) {
      flushed.wait();
    }
  }
  try (Table table = conn.getTable(tableName)) {
    table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1")));
  }
  long now = EnvironmentEdgeManager.currentTime();
  rs.tryRegionServerReport(now - 500, now);
  synchronized (reported) {
    reported.setValue(true);
    reported.notifyAll();
  }
  while (testUtil.getRSForFirstRegionInTable(tableName) == rs) {
    Thread.sleep(100);
  }
  try (Table table = conn.getTable(tableName)) {
    Result result = table.get(new Get(Bytes.toBytes("row0")));
    assertArrayEquals(Bytes.toBytes("val0"), result.getValue(family, qualifier));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:67,代码来源:TestSplitWalDataLoss.java

示例9: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
      wal.append(htd, info, logkey, edits, true);
    }
    region.flush(true);
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:TestFSHLog.java

示例10: allocSlot

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
/**
 * Allocate a new shared memory slot connected to this datanode.
 *
 * Must be called with the EndpointShmManager lock held.
 *
 * @param peer          The peer to use to talk to the DataNode.
 * @param usedPeer      (out param) Will be set to true if we used the peer.
 *                        When a peer is used
 *
 * @param clientName    The client name.
 * @param blockId       The block ID to use.
 * @return              null if the DataNode does not support shared memory
 *                        segments, or experienced an error creating the
 *                        shm.  The shared memory segment itself on success.
 * @throws IOException  If there was an error communicating over the socket.
 */
Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer,
    String clientName, ExtendedBlockId blockId) throws IOException {
  while (true) {
    if (closed) {
      LOG.trace("{}: the DfsClientShmManager has been closed.", this);
      return null;
    }
    if (disabled) {
      LOG.trace("{}: shared memory segment access is disabled.", this);
      return null;
    }
    // Try to use an existing slot.
    Slot slot = allocSlotFromExistingShm(blockId);
    if (slot != null) {
      return slot;
    }
    // There are no free slots.  If someone is loading more slots, wait
    // for that to finish.
    if (loading) {
      LOG.trace("{}: waiting for loading to finish...", this);
      finishedLoading.awaitUninterruptibly();
    } else {
      // Otherwise, load the slot ourselves.
      loading = true;
      lock.unlock();
      DfsClientShm shm;
      try {
        shm = requestNewShm(clientName, peer);
        if (shm == null) continue;
        // See #{DfsClientShmManager#domainSocketWatcher} for details
        // about why we do this before retaking the manager lock.
        domainSocketWatcher.add(peer.getDomainSocket(), shm);
        // The DomainPeer is now our responsibility, and should not be
        // closed by the caller.
        usedPeer.setValue(true);
      } finally {
        lock.lock();
        loading = false;
        finishedLoading.signalAll();
      }
      if (shm.isDisconnected()) {
        // If the peer closed immediately after the shared memory segment
        // was created, the DomainSocketWatcher callback might already have
        // fired and marked the shm as disconnected.  In this case, we
        // obviously don't want to add the SharedMemorySegment to our list
        // of valid not-full segments.
        LOG.debug("{}: the UNIX domain socket associated with this "
            + "short-circuit memory closed before we could make use of "
            + "the shm.", this);
      } else {
        notFull.put(shm.getShmId(), shm);
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:72,代码来源:DfsClientShmManager.java

示例11: doRepay

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private Transfer doRepay(final RepayLoanDTO params) {
    BigDecimal amount = params.getAmount();

    // Check if the amount is valid
    if (amount.compareTo(paymentService.getMinimumPayment()) < 0) {
        throw new ValidationException("amount", "loan.amount", new InvalidError());
    }

    // Get the loan payment to repay
    Calendar date = params.getDate();
    if (date == null) {
        date = Calendar.getInstance();
        params.setDate(date);
    }
    final LoanRepaymentAmountsDTO amountsDTO = getLoanPaymentAmount(params);
    final LoanPayment payment = amountsDTO.getLoanPayment();
    if (payment == null) {
        throw new UnexpectedEntityException();
    }

    // Validate the amount
    final BigDecimal remainingAmount = amountsDTO.getRemainingAmountAtDate();
    final BigDecimal diff = remainingAmount.subtract(amount);
    final MutableBoolean totallyRepaid = new MutableBoolean();
    // If the amount is on an acceptable delta, set the transfer value = parcel value
    if (diff.abs().floatValue() < PRECISION_DELTA) {
        amount = remainingAmount;
        totallyRepaid.setValue(true);
    } else if (diff.compareTo(BigDecimal.ZERO) < 0 || !params.getLoan().getTransfer().getType().getLoan().getType().allowsPartialRepayments()) {
        throw new ValidationException("amount", "loan.amount", new InvalidError());
    }
    final LocalSettings localSettings = settingsService.getLocalSettings();
    Loan loan = fetchService.fetch(params.getLoan(), Loan.Relationships.PAYMENTS, RelationshipHelper.nested(Loan.Relationships.TRANSFER, Payment.Relationships.TO, MemberAccount.Relationships.MEMBER), Loan.Relationships.TO_MEMBERS);

    // Build the transfers for repayment
    final List<TransferDTO> transfers = handlersByType.get(loan.getParameters().getType()).buildTransfersForRepayment(params, amountsDTO);
    Transfer root = null;
    BigDecimal totalAmount = BigDecimal.ZERO;
    for (final TransferDTO dto : transfers) {
        if (dto.getAmount().floatValue() < PRECISION_DELTA) {
            // If the root amount is zero, it means that the parent transfer should be the last transfer for this loan payment
            final TransferQuery tq = new TransferQuery();
            tq.setLoanPayment(payment);
            tq.setReverseOrder(true);
            tq.setUniqueResult();
            final List<Transfer> paymentTransfers = paymentService.search(tq);
            if (paymentTransfers.isEmpty()) {
                throw new IllegalStateException("The root transfer has amount 0 and there is no other transfers for this payment");
            }
            root = paymentTransfers.iterator().next();
        } else {
            totalAmount = totalAmount.add(dto.getAmount());
            dto.setParent(root);
            dto.setLoanPayment(payment);
            final Transfer transfer = (Transfer) paymentService.insertWithoutNotification(dto);
            if (root == null) {
                // The first will be the root. All others are it's children
                root = transfer;
            }
        }
    }

    // Update the loan payment
    final BigDecimal totalRepaid = localSettings.round(payment.getRepaidAmount().add(totalAmount));
    payment.setRepaidAmount(totalRepaid);
    if (totallyRepaid.booleanValue()) {
        // Mark the payment as repaid, if is the case
        payment.setStatus(LoanPayment.Status.REPAID);
        payment.setRepaymentDate(params.getDate());
    }
    payment.setTransfers(null); // Avoid 2 representations of the transfers collection. It's inverse="true", no problem setting null
    loanPaymentDao.update(payment);

    // Return the generated root transfer
    return root;
}
 
开发者ID:mateli,项目名称:OpenCyclos,代码行数:77,代码来源:LoanServiceImpl.java

示例12: cancelScheduledPaymentsAndNotify

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
@Override
public void cancelScheduledPaymentsAndNotify(final Member member, final Collection<MemberAccountType> accountTypes) {
    List<ScheduledPayment> scheduledPayments = scheduledPaymentDao.getUnrelatedPendingPayments(member, accountTypes);
    final Set<Member> membersToNotify = new HashSet<Member>();
    final Set<MemberAccountType> removedAccounts = new HashSet<MemberAccountType>();

    // this flag is true if the member was not removed and at least on of the incoming payment should notify the receiver (in this case the
    // member)
    // or is from an invoice or there is at least one outgoing payment (the member is the payer)
    final MutableBoolean notifyMember = new MutableBoolean(false);
    for (ScheduledPayment scheduledPayment : scheduledPayments) {
        cancel(scheduledPayment);

        boolean incoming = member.equals(scheduledPayment.getToOwner());
        if (incoming) { // member is the receiver then notify the payer
            if (scheduledPayment.getFromOwner() instanceof Member) { // there is not notification for incoming system payments
                Member payer = (Member) scheduledPayment.getFromOwner();
                membersToNotify.add(payer);
                if (!member.getGroup().isRemoved() && !notifyMember.booleanValue()) {
                    notifyMember.setValue(scheduledPayment.isShowToReceiver() || isFromInvoice(scheduledPayment));
                }
                removedAccounts.add((MemberAccountType) scheduledPayment.getTo().getType());
            }
        } else { // outgoing (member is the payer)
            if (scheduledPayment.getToOwner() instanceof Member) { // there is not notification for outgoing system payments
                if (scheduledPayment.isShowToReceiver() || isFromInvoice(scheduledPayment)) {
                    Member receiver = (Member) scheduledPayment.getToOwner();
                    membersToNotify.add(receiver);
                }
                removedAccounts.add((MemberAccountType) scheduledPayment.getFrom().getType());
            }
            if (!member.getGroup().isRemoved()) {
                notifyMember.setValue(true);
            }
        }
    }

    if (!scheduledPayments.isEmpty()) {
        CurrentTransactionData.addTransactionCommitListener(new TransactionCommitListener() {
            @Override
            public void onTransactionCommit() {
                transactionHelper.runInCurrentThread(new TransactionCallbackWithoutResult() {
                    @Override
                    protected void doInTransactionWithoutResult(final TransactionStatus status) {
                        memberNotificationHandler.scheduledPaymentsCancelledNotification(member, notifyMember.booleanValue(), membersToNotify, removedAccounts);
                    }
                });
            }
        });
    }
}
 
开发者ID:mateli,项目名称:OpenCyclos,代码行数:52,代码来源:ScheduledPaymentServiceImpl.java

示例13: getResults

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
private String getResults(ResultSet resultSet, boolean isTableType, MutableBoolean isComplete)
    throws SQLException {
  ResultSetMetaData md = resultSet.getMetaData();
  StringBuilder msg;
  if (isTableType) {
    msg = new StringBuilder(TABLE_MAGIC_TAG);
  } else {
    msg = new StringBuilder();
  }

  for (int i = 1; i < md.getColumnCount() + 1; i++) {
    if (i > 1) {
      msg.append(TAB);
    }
    msg.append(replaceReservedChars(md.getColumnName(i)));
  }
  msg.append(NEWLINE);

  int displayRowCount = 0;
  while (resultSet.next()) {
    if (displayRowCount >= getMaxResult()) {
      isComplete.setValue(false);
      break;
    }
    for (int i = 1; i < md.getColumnCount() + 1; i++) {
      Object resultObject;
      String resultValue;
      resultObject = resultSet.getObject(i);
      if (resultObject == null) {
        resultValue = "null";
      } else {
        resultValue = resultSet.getString(i);
      }
      msg.append(replaceReservedChars(resultValue));
      if (i != md.getColumnCount()) {
        msg.append(TAB);
      }
    }
    msg.append(NEWLINE);
    displayRowCount++;
  }
  return msg.toString();
}
 
开发者ID:apache,项目名称:zeppelin,代码行数:44,代码来源:JDBCInterpreter.java

示例14: readBlobFromRDD

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
@Override
protected MatrixBlock readBlobFromRDD(RDDObject rdd, MutableBoolean writeStatus) 
	throws IOException
{
	//note: the read of a matrix block from an RDD might trigger
	//lazy evaluation of pending transformations.
	RDDObject lrdd = rdd;

	//prepare return status (by default only collect)
	writeStatus.setValue(false);
	
	MetaDataFormat iimd = (MetaDataFormat) _metaData;
	MatrixCharacteristics mc = iimd.getMatrixCharacteristics();
	InputInfo ii = iimd.getInputInfo();
	MatrixBlock mb = null;
	try 
	{
		//prevent unnecessary collect through rdd checkpoint
		if( rdd.allowsShortCircuitCollect() ) {
			lrdd = (RDDObject)rdd.getLineageChilds().get(0);
		}
		
		//obtain matrix block from RDD
		int rlen = (int)mc.getRows();
		int clen = (int)mc.getCols();
		int brlen = (int)mc.getRowsPerBlock();
		int bclen = (int)mc.getColsPerBlock();
		long nnz = mc.getNonZeros();
		
		//guarded rdd collect 
		if( ii == InputInfo.BinaryBlockInputInfo && //guarded collect not for binary cell
			!OptimizerUtils.checkSparkCollectMemoryBudget(mc, getPinnedSize()+getBroadcastSize()) ) {
			//write RDD to hdfs and read to prevent invalid collect mem consumption 
			//note: lazy, partition-at-a-time collect (toLocalIterator) was significantly slower
			if( !MapReduceTool.existsFileOnHDFS(_hdfsFileName) ) { //prevent overwrite existing file
				long newnnz = SparkExecutionContext.writeRDDtoHDFS(lrdd, _hdfsFileName, iimd.getOutputInfo());
				_metaData.getMatrixCharacteristics().setNonZeros(newnnz);
				((RDDObject)rdd).setPending(false); //mark rdd as non-pending (for export)
				((RDDObject)rdd).setHDFSFile(true); //mark rdd as hdfs file (for restore)
				writeStatus.setValue(true);         //mark for no cache-write on read
				//note: the flag hdfsFile is actually not entirely correct because we still hold an rdd 
				//reference to the input not to an rdd of the hdfs file but the resulting behavior is correct
			}
			mb = readBlobFromHDFS(_hdfsFileName);
		}
		else if( ii == InputInfo.BinaryCellInputInfo ) {
			//collect matrix block from binary block RDD
			mb = SparkExecutionContext.toMatrixBlock(lrdd, rlen, clen, nnz);		
		}
		else {
			//collect matrix block from binary cell RDD
			mb = SparkExecutionContext.toMatrixBlock(lrdd, rlen, clen, brlen, bclen, nnz);	
		}
	}
	catch(DMLRuntimeException ex) {
		throw new IOException(ex);
	}
	
	//sanity check correct output
	if( mb == null )
		throw new IOException("Unable to load matrix from rdd.");
	
	return mb;
}
 
开发者ID:apache,项目名称:systemml,代码行数:65,代码来源:MatrixObject.java

示例15: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.commons.lang.mutable.MutableBoolean; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1);
      wal.append(htd, info, logkey, edits, region.getSequenceId(), true, null);
    }
    region.flushcache();
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId().get();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId().get());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:82,代码来源:TestFSHLog.java


注:本文中的org.apache.commons.lang.mutable.MutableBoolean.setValue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。