本文整理汇总了C++中DEBUG_ONLY函数的典型用法代码示例。如果您正苦于以下问题:C++ DEBUG_ONLY函数的具体用法?C++ DEBUG_ONLY怎么用?C++ DEBUG_ONLY使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DEBUG_ONLY函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
vmIntrinsics::ID iid) {
const bool not_for_compiler_entry = false; // this is the interpreter entry
assert(is_signature_polymorphic(iid), "expected invoke iid");
if (iid == vmIntrinsics::_invokeGeneric ||
iid == vmIntrinsics::_compiledLambdaForm) {
// Perhaps surprisingly, the symbolic references visible to Java are not directly used.
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument.
__ hlt(); // empty stubs make SG sick
return NULL;
}
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
// rbx: Method*
// rdx: argument locator (parameter slot count, added to rsp)
// rcx: used as temp to hold mh or receiver
// rax, rdi: garbage temps, blown away
Register rdx_argp = rdx; // argument list ptr, live on error paths
Register rax_temp = rax;
Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled
Register rbx_method = rbx; // eventual target of this invocation
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
if (VerifyMethodHandles) {
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
__ jcc(Assembler::equal, L);
if (iid == vmIntrinsics::_linkToVirtual ||
iid == vmIntrinsics::_linkToSpecial) {
// could do this for all kinds, but would explode assembly code size
trace_method_handle(_masm, "bad Method*::intrinsic_id");
}
__ STOP("bad Method*::intrinsic_id");
__ bind(L);
BLOCK_COMMENT("} verify_intrinsic_id");
}
// First task: Find out how big the argument list is.
Address rdx_first_arg_addr;
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
__ load_sized_value(rdx_argp,
Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
} else {
DEBUG_ONLY(rdx_argp = noreg);
}
if (!is_signature_polymorphic_static(iid)) {
__ movptr(rcx_mh, rdx_first_arg_addr);
DEBUG_ONLY(rdx_argp = noreg);
}
// rdx_first_arg_addr is live!
trace_method_handle_interpreter_entry(_masm, iid);
if (iid == vmIntrinsics::_invokeBasic) {
generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
} else {
// Adjust argument list by popping the trailing MemberName argument.
Register rcx_recv = noreg;
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
// Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
__ movptr(rcx_recv = rcx, rdx_first_arg_addr);
}
DEBUG_ONLY(rdx_argp = noreg);
Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
__ pop(rax_temp); // return address
__ pop(rbx_member); // extract last argument
__ push(rax_temp); // re-push return address
generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
}
return entry_point;
}
示例2: DEBUG_ONLY
void RowScatter
( T alpha,
const ElementalMatrix<T>& A,
ElementalMatrix<T>& B )
{
DEBUG_ONLY(CSE cse("axpy_contract::RowScatter"))
AssertSameGrids( A, B );
if( A.Height() != B.Height() || A.Width() != B.Width() )
LogicError("Matrix sizes did not match");
if( !B.Participating() )
return;
const Int width = B.Width();
const Int colDiff = B.ColAlign()-A.ColAlign();
if( colDiff == 0 )
{
if( width == 1 )
{
const Int localHeight = B.LocalHeight();
const Int portionSize = mpi::Pad( localHeight );
//vector<T> buffer( portionSize );
vector<T> buffer;
buffer.reserve( portionSize );
// Reduce to rowAlign
const Int rowAlign = B.RowAlign();
mpi::Reduce
( A.LockedBuffer(), buffer.data(), portionSize,
rowAlign, B.RowComm() );
if( B.RowRank() == rowAlign )
{
axpy::util::InterleaveMatrixUpdate
( alpha, localHeight, 1,
buffer.data(), 1, localHeight,
B.Buffer(), 1, B.LDim() );
}
}
else
{
const Int rowStride = B.RowStride();
const Int rowAlign = B.RowAlign();
const Int localHeight = B.LocalHeight();
const Int localWidth = B.LocalWidth();
const Int maxLocalWidth = MaxLength(width,rowStride);
const Int portionSize = mpi::Pad( localHeight*maxLocalWidth );
const Int sendSize = rowStride*portionSize;
// Pack
//vector<T> buffer( sendSize );
vector<T> buffer;
buffer.reserve( sendSize );
copy::util::RowStridedPack
( localHeight, width,
rowAlign, rowStride,
A.LockedBuffer(), A.LDim(),
buffer.data(), portionSize );
// Communicate
mpi::ReduceScatter( buffer.data(), portionSize, B.RowComm() );
// Update with our received data
axpy::util::InterleaveMatrixUpdate
( alpha, localHeight, localWidth,
buffer.data(), 1, localHeight,
B.Buffer(), 1, B.LDim() );
}
}
else
{
#ifdef EL_UNALIGNED_WARNINGS
if( B.Grid().Rank() == 0 )
cerr << "Unaligned RowScatter" << endl;
#endif
const Int colRank = B.ColRank();
const Int colStride = B.ColStride();
const Int sendRow = Mod( colRank+colDiff, colStride );
const Int recvRow = Mod( colRank-colDiff, colStride );
const Int localHeight = B.LocalHeight();
const Int localHeightA = A.LocalHeight();
if( width == 1 )
{
//vector<T> buffer( localHeight+localHeightA );
vector<T> buffer;
buffer.reserve( localHeight+localHeightA );
T* sendBuf = &buffer[0];
T* recvBuf = &buffer[localHeightA];
// Reduce to rowAlign
const Int rowAlign = B.RowAlign();
mpi::Reduce
( A.LockedBuffer(), sendBuf, localHeightA, rowAlign, B.RowComm() );
if( B.RowRank() == rowAlign )
{
//.........这里部分代码省略.........
示例3: DEBUG_ONLY
BDM& BDM::operator=( const DistMatrix<T,STAR,VR,BLOCK>& A )
{
DEBUG_ONLY(CSE cse("[STAR,VC] = [STAR,VR]"))
LogicError("This routine is not yet written");
return *this;
}
示例4: UpdateImagPart
inline void
UpdateImagPart( Real& alpha, const Real& beta )
{
DEBUG_ONLY(CallStackEntry cse("UpdateImagPart"))
LogicError("Nonsensical update");
}
示例5: DEBUG_ONLY
DM& DM::operator=( const DistMatrix<T,STAR,MR>& A )
{
DEBUG_ONLY(CSE cse("[MC,MR] = [STAR,MR]"))
copy::ColFilter( A, *this );
return *this;
}
示例6: GetWaitingUserCount
//.........这里部分代码省略.........
//client has a valid secure hash, add him remove other one
if (thePrefs.GetVerbose())
AddDebugLogLine(false,CString(GetResString(IDS_SAMEUSERHASH)),client->GetUserName(),cur_client->GetUserName(),cur_client->GetUserName() );
RemoveFromWaitingQueue(pos2,true);
if (!cur_client->socket)
{
if(cur_client->Disconnected(_T("AddClientToQueue - same userhash 1")))
delete cur_client;
}
}
else
{
// remove both since we do not know who the bad one is
if (thePrefs.GetVerbose())
AddDebugLogLine(false,CString(GetResString(IDS_SAMEUSERHASH)),client->GetUserName(),cur_client->GetUserName(),"Both" );
RemoveFromWaitingQueue(pos2,true);
if (!cur_client->socket)
{
if(cur_client->Disconnected(_T("AddClientToQueue - same userhash 2")))
delete cur_client;
}
return;
}
}
else if (client->GetIP() == cur_client->GetIP())
{
// same IP, different port, different userhash
cSameIP++;
}
}
if (cSameIP >= 3)
{
// do not accept more than 3 clients from the same IP
if (thePrefs.GetVerbose())
DEBUG_ONLY( AddDebugLogLine(false,_T("%s's (%s) request to enter the queue was rejected, because of too many clients with the same IP"), client->GetUserName(), ipstr(client->GetConnectIP())) );
return;
}
else if (theApp.clientlist->GetClientsFromIP(client->GetIP()) >= 3)
{
if (thePrefs.GetVerbose())
DEBUG_ONLY( AddDebugLogLine(false,_T("%s's (%s) request to enter the queue was rejected, because of too many clients with the same IP (found in TrackedClientsList)"), client->GetUserName(), ipstr(client->GetConnectIP())) );
return;
}
// done
// statistic values
CKnownFile* reqfile = theApp.sharedfiles->GetFileByID((uchar*)client->GetUploadFileID());
if (reqfile)
reqfile->statistic.AddRequest();
// emule collection will bypass the queue
if (reqfile != NULL && CCollection::HasCollectionExtention(reqfile->GetFileName()) && reqfile->GetFileSize() < MAXPRIORITYCOLL_SIZE
&& !client->IsDownloading() && client->socket != NULL && client->socket->IsConnected())
{
client->SetCollectionUploadSlot(true);
RemoveFromWaitingQueue(client, true);
AddUpNextClient(_T("Collection Priority Slot"), client);
return;
}
else
client->SetCollectionUploadSlot(false);
// cap the list
// the queue limit in prefs is only a soft limit. Hard limit is 25% higher, to let in powershare clients and other
// high ranking clients after soft limit has been reached
uint32 softQueueLimit = thePrefs.GetQueueSize();
uint32 hardQueueLimit = thePrefs.GetQueueSize() + max(thePrefs.GetQueueSize()/4, 200);
// if soft queue limit has been reached, only let in high ranking clients
if ((uint32)waitinglist.GetCount() >= hardQueueLimit ||
(uint32)waitinglist.GetCount() >= softQueueLimit && // soft queue limit is reached
(client->IsFriend() && client->GetFriendSlot()) == false && // client is not a friend with friend slot
client->GetCombinedFilePrioAndCredit() < GetAverageCombinedFilePrioAndCredit()) { // and client has lower credits/wants lower prio file than average client in queue
// then block client from getting on queue
return;
}
if (client->IsDownloading())
{
// he's already downloading and wants probably only another file
if (thePrefs.GetDebugClientTCPLevel() > 0)
DebugSend("OP__AcceptUploadReq", client);
Packet* packet = new Packet(OP_ACCEPTUPLOADREQ,0);
theStats.AddUpDataOverheadFileRequest(packet->size);
client->socket->SendPacket(packet,true);
return;
}
if (waitinglist.IsEmpty() && AcceptNewClient())
{
AddUpNextClient(_T("Direct add with empty queue."), client);
}
else
{
waitinglist.AddTail(client);
client->SetUploadState(US_ONUPLOADQUEUE);
theApp.emuledlg->transferwnd->queuelistctrl.AddClient(client,true);
theApp.emuledlg->transferwnd->ShowQueueCount(waitinglist.GetCount());
client->SendRankingInfo();
}
}
示例7: gdsfilext
OS_PAGE_SIZE_DECLARE
uint4 gdsfilext(uint4 blocks, uint4 filesize, boolean_t trans_in_prog)
{
sm_uc_ptr_t old_base[2], mmap_retaddr;
boolean_t was_crit, is_mm;
char buff[DISK_BLOCK_SIZE];
int result, save_errno, status;
uint4 new_bit_maps, bplmap, map, new_blocks, new_total, max_tot_blks;
uint4 jnl_status, to_wait, to_msg, wait_period;
gtm_uint64_t avail_blocks, mmap_sz;
off_t new_eof;
trans_num curr_tn;
unix_db_info *udi;
inctn_opcode_t save_inctn_opcode;
int4 prev_extend_blks_to_upgrd;
jnl_private_control *jpc;
jnl_buffer_ptr_t jbp;
DCL_THREADGBL_ACCESS;
assert(!IS_DSE_IMAGE);
assert((cs_addrs->nl == NULL) || (process_id != cs_addrs->nl->trunc_pid)); /* mu_truncate shouldn't extend file... */
assert(!process_exiting);
DEBUG_ONLY(old_base[0] = old_base[1] = NULL);
assert(!gv_cur_region->read_only);
udi = FILE_INFO(gv_cur_region);
is_mm = (dba_mm == cs_addrs->hdr->acc_meth);
# if !defined(MM_FILE_EXT_OK)
if (!udi->grabbed_access_sem && is_mm)
return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not allowed ? */
# endif
/* Both blocks and total blocks are unsigned ints so make sure we aren't asking for huge numbers that will
overflow and end up doing silly things.
*/
assert((blocks <= (MAXTOTALBLKS(cs_data) - cs_data->trans_hist.total_blks)) || WBTEST_ENABLED(WBTEST_FILE_EXTEND_ERROR));
if (!blocks)
return (uint4)(NO_FREE_SPACE); /* should this be changed to show extension not enabled ? */
bplmap = cs_data->bplmap;
/* New total of non-bitmap blocks will be number of current, non-bitmap blocks, plus new blocks desired
* There are (bplmap - 1) non-bitmap blocks per bitmap, so add (bplmap - 2) to number of non-bitmap blocks
* and divide by (bplmap - 1) to get total number of bitmaps for expanded database. (must round up in this
* manner as every non-bitmap block must have an associated bitmap)
* Current number of bitmaps is (total number of current blocks + bplmap - 1) / bplmap.
* Subtract current number of bitmaps from number needed for expanded database to get number of new bitmaps needed.
*/
new_bit_maps = DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks
- DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap) + blocks, bplmap - 1)
- DIVIDE_ROUND_UP(cs_data->trans_hist.total_blks, bplmap);
new_blocks = blocks + new_bit_maps;
assert(0 < (int)new_blocks);
if (new_blocks + cs_data->trans_hist.total_blks > MAXTOTALBLKS(cs_data))
{
assert(FALSE);
send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(1) ERR_TOTALBLKMAX);
return (uint4)(NO_FREE_SPACE);
}
if (0 != (save_errno = disk_block_available(udi->fd, &avail_blocks, FALSE)))
{
send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno);
rts_error_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DBFILERR, 2, DB_LEN_STR(gv_cur_region), save_errno);
} else
{
if (!(gtmDebugLevel & GDL_IgnoreAvailSpace))
{ /* Bypass this space check if debug flag above is on. Allows us to create a large sparce DB
* in space it could never fit it if wasn't sparse. Needed for some tests.
*/
avail_blocks = avail_blocks / (cs_data->blk_size / DISK_BLOCK_SIZE);
if ((blocks * EXTEND_WARNING_FACTOR) > avail_blocks)
{
if (blocks > (uint4)avail_blocks)
{
SETUP_THREADGBL_ACCESS;
if (!ANTICIPATORY_FREEZE_ENABLED(cs_addrs))
return (uint4)(NO_FREE_SPACE);
else
send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(6) MAKE_MSG_WARNING(ERR_NOSPACEEXT), 4,
DB_LEN_STR(gv_cur_region), new_blocks, (uint4)avail_blocks);
} else
send_msg_csa(CSA_ARG(cs_addrs) VARLSTCNT(5) ERR_DSKSPACEFLOW, 3, DB_LEN_STR(gv_cur_region),
(uint4)(avail_blocks - ((new_blocks <= avail_blocks) ? new_blocks : 0)));
}
}
}
/* From here on, we need to use GDSFILEXT_CLNUP before returning to the caller */
was_crit = cs_addrs->now_crit;
assert(!cs_addrs->hold_onto_crit || was_crit);
/* If we are coming from mupip_extend (which gets crit itself) we better have waited for any unfreezes to occur.
* If we are coming from online rollback (when that feature is available), we will come in holding crit and in
* the final retry. In that case too, we expect to have waited for unfreezes to occur in the caller itself.
* Therefore if we are coming in holding crit from MUPIP, we expect the db to be unfrozen so no need to wait for
* freeze.
* If we are coming from GT.M and final retry (in which case we come in holding crit) we expect to have waited
* for any unfreezes (by invoking tp_crit_all_regions) to occur (TP or non-TP) before coming into this
* function. However, there is one exception. In the final retry, if tp_crit_all_regions notices that
* at least one of the participating regions did ONLY READs, it will not wait for any freeze on THAT region
* to complete before grabbing crit. Later, in the final retry, if THAT region did an update which caused
* op_tcommit to invoke bm_getfree->gdsfilext, then we would have come here with a frozen region on which
* we hold crit.
*/
assert(!was_crit || !cs_data->freeze || (dollar_tlevel && (CDB_STAGNATE <= t_tries)));
//.........这里部分代码省略.........
示例8: DEBUG_ONLY
inline void ComputeFactorCommMeta
( DistSymmInfo& info, bool computeFactRecvInds )
{
DEBUG_ONLY(CallStackEntry cse("ComputeFactorCommMeta"))
info.distNodes[0].factorMeta.Empty();
const Int numDist = info.distNodes.size();
for( Int s=1; s<numDist; ++s )
{
DistSymmNodeInfo& node = info.distNodes[s];
const int teamSize = mpi::CommSize( node.comm );
const DistSymmNodeInfo& childNode = info.distNodes[s-1];
// Fill factorMeta.numChildSendInds
FactorCommMeta& commMeta = node.factorMeta;
commMeta.Empty();
const int gridHeight = node.grid->Height();
const int gridWidth = node.grid->Width();
const int childGridHeight = childNode.grid->Height();
const int childGridWidth = childNode.grid->Width();
const int childGridRow = childNode.grid->Row();
const int childGridCol = childNode.grid->Col();
const Int mySize = childNode.size;
const Int updateSize = childNode.lowerStruct.size();
commMeta.numChildSendInds.resize( teamSize );
elem::MemZero( &commMeta.numChildSendInds[0], teamSize );
const std::vector<Int>& myRelInds =
( childNode.onLeft ? node.leftRelInds : node.rightRelInds );
{
const Int colAlign = mySize % childGridHeight;
const Int rowAlign = mySize % childGridWidth;
const Int colShift =
Shift( childGridRow, colAlign, childGridHeight );
const Int rowShift =
Shift( childGridCol, rowAlign, childGridWidth );
const Int localHeight =
Length( updateSize, colShift, childGridHeight );
const Int localWidth =
Length( updateSize, rowShift, childGridWidth );
for( Int jChildLoc=0; jChildLoc<localWidth; ++jChildLoc )
{
const Int jChild = rowShift + jChildLoc*childGridWidth;
const int destGridCol = myRelInds[jChild] % gridWidth;
Int localColShift;
if( colShift > jChild )
localColShift = 0;
else if( (jChild-colShift) % childGridHeight == 0 )
localColShift = (jChild-colShift)/childGridHeight;
else
localColShift = (jChild-colShift)/childGridHeight + 1;
for( Int iChildLoc=localColShift;
iChildLoc<localHeight; ++iChildLoc )
{
const Int iChild = colShift + iChildLoc*childGridHeight;
if( iChild >= jChild )
{
const int destGridRow = myRelInds[iChild] % gridHeight;
const int destRank = destGridRow+destGridCol*gridHeight;
++commMeta.numChildSendInds[destRank];
}
}
}
}
// Optionally compute the recv indices for the factorization.
// This is optional since it requires a nontrivial amount of storage.
if( computeFactRecvInds )
ComputeFactRecvInds( node, childNode );
}
}
示例9: grab_gtmsource_srv_latch
/* Note we don't increment fast_lock_count as part of getting the latch and decrement it when releasing it because ROLLBACK
* can hold onto this latch for a long while and can do updates in this duration and we should NOT have a non-zero fast_lock_count
* as many places like t_begin/dsk_read have asserts to this effect. It is okay to NOT increment fast_lock_count as ROLLBACK
* anyways have logic to disable interrupts the moment it starts doing database updates.
*/
boolean_t grab_gtmsource_srv_latch(sm_global_latch_ptr_t latch, uint4 max_timeout_in_secs, uint4 onln_rlbk_action)
{
int spins, maxspins, retries, max_retries;
unix_db_info *udi;
sgmnt_addrs *repl_csa;
boolean_t cycle_mismatch;
assert(!have_crit(CRIT_HAVE_ANY_REG));
udi = FILE_INFO(jnlpool.jnlpool_dummy_reg);
repl_csa = &udi->s_addrs;
maxspins = num_additional_processors ? MAX_LOCK_SPINS(LOCK_SPINS, num_additional_processors) : 1;
max_retries = max_timeout_in_secs * 4 * 1000; /* outer-loop : X minutes, 1 loop in 4 is sleep of 1 ms */
for (retries = max_retries - 1; 0 < retries; retries--)
{
for (spins = maxspins; 0 < spins; spins--)
{
assert(latch->u.parts.latch_pid != process_id); /* We better not hold it if trying to get it */
if (GET_SWAPLOCK(latch))
{
DEBUG_ONLY(locknl = repl_csa->nl); /* Use the journal pool to maintain lock history */
LOCK_HIST("OBTN", latch, process_id, retries);
DEBUG_ONLY(locknl = NULL);
if (jnlpool.repl_inst_filehdr->file_corrupt && !jgbl.onlnrlbk)
{
/* Journal pool indicates an abnormally terminated online rollback. Cannot continue until
* the rollback command is re-run to bring the journal pool/file and instance file to a
* consistent state.
*/
/* No need to release the latch before rts_error (mupip_exit_handler will do it for us) */
rts_error(VARLSTCNT(8) ERR_REPLREQROLLBACK, 2, LEN_AND_STR(udi->fn),
ERR_TEXT, 2, LEN_AND_LIT("file_corrupt field in instance file header is set to"
" TRUE"));
}
cycle_mismatch = (repl_csa->onln_rlbk_cycle != jnlpool.jnlpool_ctl->onln_rlbk_cycle);
assert((ASSERT_NO_ONLINE_ROLLBACK != onln_rlbk_action) || !cycle_mismatch);
if ((HANDLE_CONCUR_ONLINE_ROLLBACK == onln_rlbk_action) && cycle_mismatch)
{
assert(is_src_server);
SYNC_ONLN_RLBK_CYCLES;
gtmsource_onln_rlbk_clnup(); /* side-effect : sets gtmsource_state */
rel_gtmsource_srv_latch(latch);
}
return TRUE;
}
}
if (retries & 0x3)
{ /* On all but every 4th pass, do a simple rel_quant */
rel_quant();
} else
{
/* On every 4th pass, we bide for awhile */
wcs_sleep(LOCK_SLEEP);
if (RETRY_CASLATCH_CUTOFF == (retries % LOCK_TRIES))
performCASLatchCheck(latch, TRUE);
}
}
DUMP_LOCKHIST();
assert(FALSE);
assert(jnlpool.gtmsource_local && jnlpool.gtmsource_local->gtmsource_pid);
rts_error(VARLSTCNT(5) ERR_SRVLCKWT2LNG, 2, max_timeout_in_secs, jnlpool.gtmsource_local->gtmsource_pid);
return FALSE; /* to keep the compiler happy */
}
示例10: send_mesg2gtmsecshr
int send_mesg2gtmsecshr(unsigned int code, unsigned int id, char *path, int path_len)
{
int client_sockfd, create_server_status, fcntl_res;
int req_code, wait_count = 0;
int recv_len, send_len;
ssize_t num_chars_recvd, num_chars_sent;
int save_errno, ret_code = 0, init_ret_code = 0;
int loop_count = 0;
int recv_complete, send_complete;
boolean_t retry = FALSE;
size_t server_proc_len;
int semop_res;
int selstat, status;
char *recv_ptr, *send_ptr;
struct sockaddr_un server_proc;
struct sembuf sop[4];
fd_set wait_on_fd;
gtmsecshr_mesg mesg;
TID timer_id;
int4 msec_timeout;
char *gtm_tmp_ptr;
struct stat stat_buf;
struct shmid_ds shm_info;
int len;
DCL_THREADGBL_ACCESS;
SETUP_THREADGBL_ACCESS;
DBGGSSHR((LOGFLAGS, "secshr_client: New send request\n"));
if (!gtm_dist_ok_to_use)
rts_error_csa(CSA_ARG(NULL) VARLSTCNT(6) ERR_GTMDISTUNVERIF, 4, STRLEN(gtm_dist), gtm_dist,
gtmImageNames[image_type].imageNameLen, gtmImageNames[image_type].imageName);
/* Create communication key (hash of release name) if it has not already been done */
if (0 == TREF(gtmsecshr_comkey))
{
STR_HASH((char *)gtm_release_name, gtm_release_name_len, TREF(gtmsecshr_comkey), 0);
}
timer_id = (TID)send_mesg2gtmsecshr;
if (!gtmsecshr_file_check_done)
{
len = STRLEN(gtm_dist);
memcpy(gtmsecshr_path, gtm_dist, len);
gtmsecshr_path[len] = '/';
memcpy(gtmsecshr_path + len + 1, GTMSECSHR_EXECUTABLE, STRLEN(GTMSECSHR_EXECUTABLE));
gtmsecshr_pathname.addr = gtmsecshr_path;
gtmsecshr_pathname.len = len + 1 + STRLEN(GTMSECSHR_EXECUTABLE);
assertpro(GTM_PATH_MAX > gtmsecshr_pathname.len);
gtmsecshr_pathname.addr[gtmsecshr_pathname.len] = '\0';
if (-1 == Stat(gtmsecshr_pathname.addr, &stat_buf))
rts_error_csa(CSA_ARG(NULL) VARLSTCNT(8) ERR_SYSCALL, 5,
LEN_AND_LIT("stat"), CALLFROM, errno);
if ((ROOTUID != stat_buf.st_uid)
|| !(stat_buf.st_mode & S_ISUID)
|| (0 != ACCESS(gtmsecshr_pathname.addr, (X_OK))))
rts_error_csa(CSA_ARG(NULL) VARLSTCNT(1) ERR_GTMSECSHRPERM);
gtmsecshr_file_check_done = TRUE;
}
if (!gtmsecshr_sock_init_done && (0 < (init_ret_code = gtmsecshr_sock_init(CLIENT)))) /* Note assignment */
return init_ret_code;
DEBUG_ONLY(mesg.usesecshr = TREF(gtm_usesecshr)); /* Flag ignored in PRO build */
while (MAX_COMM_ATTEMPTS >= loop_count)
{ /* first, try the sendto */
req_code = mesg.code = code;
send_len = (int4)(GTM_MESG_HDR_SIZE);
if (REMOVE_FILE == code)
{
assert(GTM_PATH_MAX > path_len); /* Name is not user supplied so simple check */
memcpy(mesg.mesg.path, path, path_len);
send_len += path_len;
} else if (FLUSH_DB_IPCS_INFO == code)
{
assert(GTM_PATH_MAX > db_ipcs.fn_len);
memcpy(&mesg.mesg.db_ipcs, &db_ipcs, (offsetof(ipcs_mesg, fn[0]) + db_ipcs.fn_len + 1));
/* Most of the time file length is much smaller than GTM_PATH_MAX */
send_len += offsetof(ipcs_mesg, fn[0]);
send_len += mesg.mesg.db_ipcs.fn_len + 1;
} else
{
mesg.mesg.id = id;
send_len += SIZEOF(mesg.mesg.id);
}
DBGGSSHR((LOGFLAGS, "secshr_client: loop %d frm-pid: %d to-pid: %d send_len: %d code: %d\n", loop_count,
process_id, id, send_len, code));
mesg.comkey = TREF(gtmsecshr_comkey); /* Version communication key */
mesg.pid = process_id; /* Process id of client */
mesg.seqno = ++cur_seqno;
send_ptr = (char *)&mesg;
send_complete = FALSE;
SENDTO_SOCK(gtmsecshr_sockfd, send_ptr, send_len, 0, (struct sockaddr *)>msecshr_sock_name,
(GTM_SOCKLEN_TYPE)gtmsecshr_sockpath_len, num_chars_sent); /* This form handles EINTR internally */
save_errno = errno;
DBGGSSHR((LOGFLAGS, "secshr_client: sendto rc: %d errno: %d (only important if rc=-1)\n", (int)num_chars_sent,
save_errno));
if (0 >= num_chars_sent)
{ /* SENDTO_SOCK failed - start server and attempt to resend */
if ((EISCONN == save_errno) || (EBADF == save_errno))
{
gtmsecshr_sock_cleanup(CLIENT);
gtmsecshr_sock_init(CLIENT);
wcs_backoff(loop_count + 1);
DBGGSSHR((LOGFLAGS, "secshr_client: Connection error, reset socket\n"));
//.........这里部分代码省略.........
示例11: ComputeMultiVecCommMeta
inline void ComputeMultiVecCommMeta( DistSymmInfo& info )
{
DEBUG_ONLY(CallStackEntry cse("ComputeMultiVecCommMeta"))
// Handle the interface node
info.distNodes[0].multiVecMeta.Empty();
info.distNodes[0].multiVecMeta.localSize = info.localNodes.back().size;
// Handle the truly distributed nodes
const int numDist = info.distNodes.size();
for( int s=1; s<numDist; ++s )
{
DistSymmNodeInfo& node = info.distNodes[s];
const int teamSize = mpi::CommSize( node.comm );
const int teamRank = mpi::CommRank( node.comm );
const DistSymmNodeInfo& childNode = info.distNodes[s-1];
const int childTeamSize = mpi::CommSize( childNode.comm );
const int childTeamRank = mpi::CommRank( childNode.comm );
const bool inFirstTeam = ( childTeamRank == teamRank );
const bool leftIsFirst = ( childNode.onLeft==inFirstTeam );
const int leftTeamSize =
( childNode.onLeft ? childTeamSize : teamSize-childTeamSize );
const int rightTeamSize = teamSize - leftTeamSize;
const int leftTeamOff = ( leftIsFirst ? 0 : rightTeamSize );
const int rightTeamOff = ( leftIsFirst ? leftTeamSize : 0 );
const std::vector<Int>& myRelInds =
( childNode.onLeft ? node.leftRelInds : node.rightRelInds );
// Fill numChildSendInds
MultiVecCommMeta& commMeta = node.multiVecMeta;
commMeta.Empty();
commMeta.numChildSendInds.resize( teamSize );
elem::MemZero( &commMeta.numChildSendInds[0], teamSize );
const Int updateSize = childNode.lowerStruct.size();
{
const Int align = childNode.size % childTeamSize;
const Int shift = Shift( childTeamRank, align, childTeamSize );
const Int localHeight = Length( updateSize, shift, childTeamSize );
for( Int iChildLoc=0; iChildLoc<localHeight; ++iChildLoc )
{
const Int iChild = shift + iChildLoc*childTeamSize;
const int destRank = myRelInds[iChild] % teamSize;
++commMeta.numChildSendInds[destRank];
}
}
const Int numLeftInds = node.leftRelInds.size();
const Int numRightInds = node.rightRelInds.size();
std::vector<Int> leftInds, rightInds;
for( Int i=0; i<numLeftInds; ++i )
if( node.leftRelInds[i] % teamSize == teamRank )
leftInds.push_back( i );
for( Int i=0; i<numRightInds; ++i )
if( node.rightRelInds[i] % teamSize == teamRank )
rightInds.push_back( i );
//
// Compute the solve recv indices
//
commMeta.childRecvInds.resize( teamSize );
// Compute the recv indices for the left child
const Int numLeftSolveInds = leftInds.size();
for( Int iPre=0; iPre<numLeftSolveInds; ++iPre )
{
const Int iChild = leftInds[iPre];
const Int iFront = node.leftRelInds[iChild];
const Int iFrontLoc = (iFront-teamRank) / teamSize;
const int childRank = (node.leftSize+iChild) % leftTeamSize;
const int frontRank = leftTeamOff + childRank;
commMeta.childRecvInds[frontRank].push_back(iFrontLoc);
}
// Compute the recv indices for the right child
const Int numRightSolveInds = rightInds.size();
for( Int iPre=0; iPre<numRightSolveInds; ++iPre )
{
const Int iChild = rightInds[iPre];
const Int iFront = node.rightRelInds[iChild];
const Int iFrontLoc = (iFront-teamRank) / teamSize;
const int childRank = (node.rightSize+iChild) % rightTeamSize;
const int frontRank = rightTeamOff + childRank;
commMeta.childRecvInds[frontRank].push_back(iFrontLoc);
}
commMeta.localSize = Length(node.size,teamRank,teamSize);
}
}
示例12: Her
void Her( UpperOrLower uplo, Base<T> alpha, const Matrix<T>& x, Matrix<T>& A )
{
DEBUG_ONLY(CSE cse("Her"))
Syr( uplo, T(alpha), x, A, true );
}
示例13: DEBUG_ONLY
BDM& BDM::operator=( const BlockMatrix<T>& A )
{
DEBUG_ONLY(CSE cse("[CIRC,CIRC] = ABDM"))
copy::Gather( A, *this );
return *this;
}
示例14: DEBUG_ONLY
DM& DM::operator=( const DistMatrix<T,STAR,VC>& A )
{
DEBUG_ONLY(CSE cse("[STAR,STAR] = [STAR,VC]"))
copy::RowAllGather( A, *this );
return *this;
}
示例15: SafeDeterminant
SafeProduct<F> SafeDeterminant( const ElementalMatrix<F>& A )
{
DEBUG_ONLY(CSE cse("SafeDeterminant"))
DistMatrix<F> B( A );
return det::LUPartialPiv( B );
}