本文整理汇总了C++中AFS_STATCNT函数的典型用法代码示例。如果您正苦于以下问题:C++ AFS_STATCNT函数的具体用法?C++ AFS_STATCNT怎么用?C++ AFS_STATCNT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AFS_STATCNT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DRead
int
DRead(struct dcache *adc, int page, struct DirBuffer *entry)
{
/* Read a page from the disk. */
struct buffer *tb, *tb2;
struct osi_file *tfile;
int code;
AFS_STATCNT(DRead);
memset(entry, 0, sizeof(struct DirBuffer));
ObtainWriteLock(&afs_bufferLock, 256);
#define bufmatch(tb) (tb->page == page && tb->fid == adc->index)
#define buf_Front(head,parent,p) {(parent)->hashNext = (p)->hashNext; (p)->hashNext= *(head);*(head)=(p);}
/* this apparently-complicated-looking code is simply an example of
* a little bit of loop unrolling, and is a standard linked-list
* traversal trick. It saves a few assignments at the the expense
* of larger code size. This could be simplified by better use of
* macros.
*/
if ((tb = phTable[pHash(adc->index, page)])) {
if (bufmatch(tb)) {
ObtainWriteLock(&tb->lock, 257);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb->lock);
entry->buffer = tb;
entry->data = tb->data;
return 0;
} else {
struct buffer **bufhead;
bufhead = &(phTable[pHash(adc->index, page)]);
while ((tb2 = tb->hashNext)) {
if (bufmatch(tb2)) {
buf_Front(bufhead, tb, tb2);
ObtainWriteLock(&tb2->lock, 258);
tb2->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb2->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb2->lock);
entry->buffer = tb2;
entry->data = tb2->data;
return 0;
}
if ((tb = tb2->hashNext)) {
if (bufmatch(tb)) {
buf_Front(bufhead, tb2, tb);
ObtainWriteLock(&tb->lock, 259);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb->lock);
entry->buffer = tb;
entry->data = tb->data;
return 0;
}
} else
break;
}
}
} else
tb2 = NULL;
AFS_STATS(afs_stats_cmperf.bufMisses++);
/* can't find it */
/* The last thing we looked at was either tb or tb2 (or nothing). That
* is at least the oldest buffer on one particular hash chain, so it's
* a pretty good place to start looking for the truly oldest buffer.
*/
tb = afs_newslot(adc, page, (tb ? tb : tb2));
if (!tb) {
ReleaseWriteLock(&afs_bufferLock);
return EIO;
}
ObtainWriteLock(&tb->lock, 260);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
if (page * AFS_BUFFER_PAGESIZE >= adc->f.chunkBytes) {
tb->fid = NULLIDX;
afs_reset_inode(&tb->inode);
tb->lockers--;
ReleaseWriteLock(&tb->lock);
return EIO;
}
tfile = afs_CFileOpen(&adc->f.inode);
code =
afs_CFileRead(tfile, tb->page * AFS_BUFFER_PAGESIZE, tb->data,
AFS_BUFFER_PAGESIZE);
afs_CFileClose(tfile);
if (code < AFS_BUFFER_PAGESIZE) {
tb->fid = NULLIDX;
afs_reset_inode(&tb->inode);
tb->lockers--;
//.........这里部分代码省略.........
示例2: afs_TruncateAllSegments
/*
* afs_TruncateAllSegments
*
* Description:
* Truncate a cache file.
*
* Parameters:
* avc : Ptr to vcache entry to truncate.
* alen : Number of bytes to make the file.
* areq : Ptr to request structure.
*
* Environment:
* Called with avc write-locked; in VFS40 systems, pvnLock is also
* held.
*/
int
afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
struct vrequest *areq, afs_ucred_t *acred)
{
struct dcache *tdc;
afs_int32 code;
afs_int32 index;
afs_size_t newSize;
int dcCount, dcPos;
struct dcache **tdcArray = NULL;
AFS_STATCNT(afs_TruncateAllSegments);
avc->f.m.Date = osi_Time();
afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
if (alen >= avc->f.m.Length) {
/*
* Special speedup since Sun's vm extends the file this way;
* we've never written to the file thus we can just set the new
* length and avoid the needless calls below.
* Also used for ftruncate calls which can extend the file.
* To completely minimize the possible extra StoreMini RPC, we really
* should keep the ExtendedPos as well and clear this flag if we
* truncate below that value before we store the file back.
*/
avc->f.states |= CExtendedFile;
avc->f.m.Length = alen;
return 0;
}
#if (defined(AFS_SUN5_ENV))
/* Zero unused portion of last page */
osi_VM_PreTruncate(avc, alen, acred);
#endif
#if (defined(AFS_SUN5_ENV))
ObtainWriteLock(&avc->vlock, 546);
avc->activeV++; /* Block new getpages */
ReleaseWriteLock(&avc->vlock);
#endif
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
/* Flush pages beyond end-of-file. */
osi_VM_Truncate(avc, alen, acred);
AFS_GLOCK();
ObtainWriteLock(&avc->lock, 79);
avc->f.m.Length = alen;
if (alen < avc->f.truncPos)
avc->f.truncPos = alen;
code = DVHash(&avc->f.fid);
/* block out others from screwing with this table */
ObtainWriteLock(&afs_xdcache, 287);
dcCount = 0;
for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetValidDSlot(index);
if (!tdc) {
ReleaseWriteLock(&afs_xdcache);
code = EIO;
goto done;
}
ReleaseReadLock(&tdc->tlock);
if (!FidCmp(&tdc->f.fid, &avc->f.fid))
dcCount++;
afs_PutDCache(tdc);
}
index = afs_dvnextTbl[index];
}
/* Now allocate space where we can save those dcache entries, and
* do a second pass over them.. Since we're holding xdcache, it
* shouldn't be changing.
*/
tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
dcPos = 0;
//.........这里部分代码省略.........
示例3: osi_UFSOpen
void *
osi_UFSOpen(afs_dcache_id_t *ainode)
{
struct vnode *vp;
struct vattr va;
struct osi_file *afile = NULL;
extern int cacheDiskType;
afs_int32 code = 0;
int dummy;
char fname[1024];
struct osi_stat tstat;
AFS_STATCNT(osi_UFSOpen);
if (cacheDiskType != AFS_FCACHE_TYPE_UFS) {
osi_Panic("UFSOpen called for non-UFS cache\n");
}
if (!afs_osicred_initialized) {
/* valid for alpha_osf, SunOS, Ultrix */
memset(&afs_osi_cred, 0, sizeof(afs_ucred_t));
afs_osi_cred.cr_ref++;
#ifndef AFS_DARWIN110_ENV
afs_osi_cred.cr_ngroups = 1;
#endif
afs_osicred_initialized = 1;
}
afile = (struct osi_file *)osi_AllocSmallSpace(sizeof(struct osi_file));
AFS_GUNLOCK();
#ifdef AFS_CACHE_VNODE_PATH
if (!ainode->ufs) {
osi_Panic("No cache inode\n");
}
code = vnode_open(ainode->ufs, O_RDWR, 0, 0, &vp, afs_osi_ctxtp);
#else
#ifndef AFS_DARWIN80_ENV
if (afs_CacheFSType == AFS_APPL_HFS_CACHE)
code = igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, &ainode->ufs, &vp, &va, &dummy); /* XXX hfs is broken */
else if (afs_CacheFSType == AFS_APPL_UFS_CACHE)
#endif
code =
igetinode(afs_cacheVfsp, (dev_t) cacheDev.dev, (ino_t) ainode->ufs,
&vp, &va, &dummy);
#ifndef AFS_DARWIN80_ENV
else
panic("osi_UFSOpen called before cacheops initialized\n");
#endif
#endif
AFS_GLOCK();
if (code) {
osi_FreeSmallSpace(afile);
osi_Panic("UFSOpen: igetinode failed");
}
afile->vnode = vp;
afile->offset = 0;
afile->proc = (int (*)())0;
#ifndef AFS_CACHE_VNODE_PATH
afile->size = va.va_size;
#else
code = afs_osi_Stat(afile, &tstat);
afile->size = tstat.size;
#endif
return (void *)afile;
}
示例4: rxk_FreeSocket
/* free socket allocated by osi_NetSocket */
int
rxk_FreeSocket(struct socket *asocket)
{
AFS_STATCNT(osi_FreeSocket);
return 0;
}
示例5: afs_StoreMini
static int
afs_StoreMini(struct vcache *avc, struct vrequest *areq)
{
struct afs_conn *tc;
struct AFSStoreStatus InStatus;
struct AFSFetchStatus OutStatus;
struct AFSVolSync tsync;
afs_int32 code;
struct rx_call *tcall;
struct rx_connection *rxconn;
afs_size_t tlen, xlen = 0;
XSTATS_DECLS;
AFS_STATCNT(afs_StoreMini);
afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, avc->f.m.Length);
tlen = avc->f.m.Length;
if (avc->f.truncPos < tlen)
tlen = avc->f.truncPos;
avc->f.truncPos = AFS_NOTRUNC;
avc->f.states &= ~CExtendedFile;
do {
tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
if (tc) {
#ifdef AFS_64BIT_CLIENT
retry:
#endif
RX_AFS_GUNLOCK();
tcall = rx_NewCall(rxconn);
RX_AFS_GLOCK();
/* Set the client mod time since we always want the file
* to have the client's mod time and not the server's one
* (to avoid problems with make, etc.) It almost always
* works fine with standard afs because them server/client
* times are in sync and more importantly this storemini
* it's a special call that would typically be followed by
* the proper store-data or store-status calls.
*/
InStatus.Mask = AFS_SETMODTIME;
InStatus.ClientModTime = avc->f.m.Date;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
&avc->f.fid.Fid, ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(tlen));
RX_AFS_GUNLOCK();
#ifdef AFS_64BIT_CLIENT
if (!afs_serverHasNo64Bit(tc)) {
code =
StartRXAFS_StoreData64(tcall,
(struct AFSFid *)&avc->f.fid.Fid,
&InStatus, avc->f.m.Length,
(afs_size_t) 0, tlen);
} else {
afs_int32 l1, l2;
l1 = avc->f.m.Length;
l2 = tlen;
if ((avc->f.m.Length > 0x7fffffff) ||
(tlen > 0x7fffffff) ||
((0x7fffffff - tlen) < avc->f.m.Length)) {
code = EFBIG;
goto error;
}
code =
StartRXAFS_StoreData(tcall,
(struct AFSFid *)&avc->f.fid.Fid,
&InStatus, l1, 0, l2);
}
#else /* AFS_64BIT_CLIENT */
code =
StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
&InStatus, avc->f.m.Length, 0, tlen);
#endif /* AFS_64BIT_CLIENT */
if (code == 0) {
code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
}
#ifdef AFS_64BIT_CLIENT
error:
#endif
code = rx_EndCall(tcall, code);
RX_AFS_GLOCK();
XSTATS_END_TIME;
#ifdef AFS_64BIT_CLIENT
if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
afs_serverSetNo64Bit(tc);
goto retry;
}
#endif /* AFS_64BIT_CLIENT */
} else
code = -1;
} while (afs_Analyze
(tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
SHARED_LOCK, NULL));
if (code == 0)
afs_ProcessFS(avc, &OutStatus, areq);
return code;
} /*afs_StoreMini */
示例6: afs_sync
int
afs_sync(struct vfs *afsp)
{
AFS_STATCNT(afs_sync);
return 0;
}
示例7: afs_open
afs_open(struct vcache **avcp, afs_int32 aflags, afs_ucred_t *acred)
#endif
{
afs_int32 code;
struct vrequest treq;
struct vcache *tvc;
int writing;
struct afs_fakestat_state fakestate;
AFS_STATCNT(afs_open);
if ((code = afs_InitReq(&treq, acred)))
return code;
#ifdef AFS_SGI64_ENV
/* avcpp can be, but is not necesarily, bhp's vnode. */
tvc = VTOAFS(BHV_TO_VNODE(bhv));
#else
tvc = *avcp;
#endif
afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc,
ICL_TYPE_INT32, aflags);
afs_InitFakeStat(&fakestate);
AFS_DISCON_LOCK();
code = afs_EvalFakeStat(&tvc, &fakestate, &treq);
if (code)
goto done;
code = afs_VerifyVCache(tvc, &treq);
if (code)
goto done;
ObtainReadLock(&tvc->lock);
if (AFS_IS_DISCONNECTED && (afs_DCacheMissingChunks(tvc) != 0)) {
ReleaseReadLock(&tvc->lock);
/* printf("Network is down in afs_open: missing chunks\n"); */
code = ENETDOWN;
goto done;
}
ReleaseReadLock(&tvc->lock);
if (aflags & (FWRITE | FTRUNC))
writing = 1;
else
writing = 0;
if (vType(tvc) == VDIR) {
/* directory */
if (writing) {
code = EISDIR;
goto done;
} else {
if (!afs_AccessOK
(tvc, ((tvc->f.states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP),
&treq, CHECK_MODE_BITS)) {
code = EACCES;
/* printf("afs_Open: no access for dir\n"); */
goto done;
}
}
} else {
#ifdef AFS_SUN5_ENV
if (AFS_NFSXLATORREQ(acred) && (aflags & FREAD)) {
if (!afs_AccessOK
(tvc, PRSFS_READ, &treq,
CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
code = EACCES;
goto done;
}
}
#endif
#ifdef AFS_AIX41_ENV
if (aflags & FRSHARE) {
/*
* Hack for AIX 4.1:
* Apparently it is possible for a file to get mapped without
* either VNOP_MAP or VNOP_RDWR being called, if (1) it is a
* sharable library, and (2) it has already been loaded. We must
* ensure that the credp is up to date. We detect the situation
* by checking for O_RSHARE at open time.
*/
/*
* We keep the caller's credentials since an async daemon will
* handle the request at some point. We assume that the same
* credentials will be used.
*/
ObtainWriteLock(&tvc->lock, 140);
if (!tvc->credp || (tvc->credp != acred)) {
crhold(acred);
if (tvc->credp) {
struct ucred *crp = tvc->credp;
tvc->credp = NULL;
crfree(crp);
}
tvc->credp = acred;
}
ReleaseWriteLock(&tvc->lock);
}
#endif
/* normal file or symlink */
//.........这里部分代码省略.........
示例8: afspag_PSetTokens
int afspag_PSetTokens(char *ain, afs_int32 ainSize, struct AFS_UCRED **acred)
{
afs_int32 i;
register struct unixuser *tu;
struct afspag_cell *tcell;
struct ClearToken clear;
char *stp;
int stLen;
afs_int32 flag, set_parent_pag = 0;
afs_int32 pag, uid;
AFS_STATCNT(PSetTokens);
if (!afs_resourceinit_flag) {
return EIO;
}
memcpy((char *)&i, ain, sizeof(afs_int32));
ain += sizeof(afs_int32);
stp = ain; /* remember where the ticket is */
if (i < 0 || i > MAXKTCTICKETLEN)
return EINVAL; /* malloc may fail */
stLen = i;
ain += i; /* skip over ticket */
memcpy((char *)&i, ain, sizeof(afs_int32));
ain += sizeof(afs_int32);
if (i != sizeof(struct ClearToken)) {
return EINVAL;
}
memcpy((char *)&clear, ain, sizeof(struct ClearToken));
if (clear.AuthHandle == -1)
clear.AuthHandle = 999; /* more rxvab compat stuff */
ain += sizeof(struct ClearToken);
if (ainSize != 2 * sizeof(afs_int32) + stLen + sizeof(struct ClearToken)) {
/* still stuff left? we've got primary flag and cell name. Set these */
memcpy((char *)&flag, ain, sizeof(afs_int32)); /* primary id flag */
ain += sizeof(afs_int32); /* skip id field */
/* rest is cell name, look it up */
/* some versions of gcc appear to need != 0 in order to get this right */
if ((flag & 0x8000) != 0) { /* XXX Use Constant XXX */
flag &= ~0x8000;
set_parent_pag = 1;
}
tcell = afspag_GetCell(ain);
} else {
/* default to primary cell, primary id */
flag = 1; /* primary id */
tcell = afspag_GetPrimaryCell();
}
if (!tcell) return ESRCH;
if (set_parent_pag) {
#if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
#if defined(AFS_DARWIN_ENV)
struct proc *p = current_proc(); /* XXX */
#else
struct proc *p = curproc; /* XXX */
#endif
#ifndef AFS_DARWIN80_ENV
uprintf("Process %d (%s) tried to change pags in PSetTokens\n",
p->p_pid, p->p_comm);
#endif
setpag(p, acred, -1, &pag, 1);
#else
#ifdef AFS_OSF_ENV
setpag(u.u_procp, acred, -1, &pag, 1); /* XXX u.u_procp is a no-op XXX */
#else
setpag(acred, -1, &pag, 1);
#endif
#endif
}
pag = PagInCred(*acred);
uid = (pag == NOPAG) ? (*acred)->cr_uid : pag;
/* now we just set the tokens */
tu = afs_GetUser(uid, tcell->cellnum, WRITE_LOCK);
if (!tu->cellinfo)
tu->cellinfo = (void *)tcell;
tu->vid = clear.ViceId;
if (tu->stp != NULL) {
afs_osi_Free(tu->stp, tu->stLen);
}
tu->stp = (char *)afs_osi_Alloc(stLen);
tu->stLen = stLen;
memcpy(tu->stp, stp, stLen);
tu->ct = clear;
#ifndef AFS_NOSTATS
afs_stats_cmfullperf.authent.TicketUpdates++;
afs_ComputePAGStats();
#endif /* AFS_NOSTATS */
tu->states |= UHasTokens;
tu->states &= ~UTokensBad;
afs_SetPrimary(tu, flag);
tu->tokenTime = osi_Time();
afs_PutUser(tu, WRITE_LOCK);
return 0;
}
示例9: afs_Analyze
/*------------------------------------------------------------------------
* EXPORTED afs_Analyze
*
* Description:
* Analyze the outcome of an RPC operation, taking whatever support
* actions are necessary.
*
* Arguments:
* aconn : Ptr to the relevant connection on which the call was made.
* acode : The return code experienced by the RPC.
* afid : The FID of the file involved in the action. This argument
* may be null if none was involved.
* areq : The request record associated with this operation.
* op : which RPC we are analyzing.
* cellp : pointer to a cell struct. Must provide either fid or cell.
*
* Returns:
* Non-zero value if the related RPC operation should be retried,
* zero otherwise.
*
* Environment:
* This routine is typically called in a do-while loop, causing the
* embedded RPC operation to be called repeatedly if appropriate
* until whatever error condition (if any) is intolerable.
*
* Side Effects:
* As advertised.
*
* NOTE:
* The retry return value is used by afs_StoreAllSegments to determine
* if this is a temporary or permanent error.
*------------------------------------------------------------------------*/
int
afs_Analyze(register struct afs_conn *aconn, afs_int32 acode,
struct VenusFid *afid, register struct vrequest *areq, int op,
afs_int32 locktype, struct cell *cellp)
{
afs_int32 i;
struct srvAddr *sa;
struct server *tsp;
struct volume *tvp;
afs_int32 shouldRetry = 0;
afs_int32 serversleft = 1;
struct afs_stats_RPCErrors *aerrP;
afs_int32 markeddown;
if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
/* On reconnection, act as connected. XXX: for now.... */
/* SXW - This may get very tired after a while. We should try and
* intercept all RPCs before they get here ... */
/*printf("afs_Analyze: disconnected\n");*/
afs_FinalizeReq(areq);
if (aconn) {
/* SXW - I suspect that this will _never_ happen - we shouldn't
* get a connection because we're disconnected !!!*/
afs_PutConn(aconn, locktype);
}
return 0;
}
AFS_STATCNT(afs_Analyze);
afs_Trace4(afs_iclSetp, CM_TRACE_ANALYZE, ICL_TYPE_INT32, op,
ICL_TYPE_POINTER, aconn, ICL_TYPE_INT32, acode, ICL_TYPE_LONG,
areq->uid);
aerrP = (struct afs_stats_RPCErrors *)0;
if ((op >= 0) && (op < AFS_STATS_NUM_FS_RPC_OPS))
aerrP = &(afs_stats_cmfullperf.rpc.fsRPCErrors[op]);
afs_FinalizeReq(areq);
if (!aconn && areq->busyCount) { /* one RPC or more got VBUSY/VRESTARTING */
tvp = afs_FindVolume(afid, READ_LOCK);
if (tvp) {
afs_warnuser("afs: Waiting for busy volume %u (%s) in cell %s\n",
(afid ? afid->Fid.Volume : 0),
(tvp->name ? tvp->name : ""),
((tvp->serverHost[0]
&& tvp->serverHost[0]->cell) ? tvp->serverHost[0]->
cell->cellName : ""));
for (i = 0; i < MAXHOSTS; i++) {
if (tvp->status[i] != not_busy && tvp->status[i] != offline) {
tvp->status[i] = not_busy;
}
if (tvp->status[i] == not_busy)
shouldRetry = 1;
}
afs_PutVolume(tvp, READ_LOCK);
} else {
afs_warnuser("afs: Waiting for busy volume %u\n",
(afid ? afid->Fid.Volume : 0));
}
if (areq->busyCount > 100) {
if (aerrP)
(aerrP->err_Volume)++;
//.........这里部分代码省略.........
示例10: VLDB_Same
static int
VLDB_Same(struct VenusFid *afid, struct vrequest *areq)
{
struct vrequest treq;
struct afs_conn *tconn;
int i, type = 0;
union {
struct vldbentry tve;
struct nvldbentry ntve;
struct uvldbentry utve;
} *v;
struct volume *tvp;
struct cell *tcell;
char *bp, tbuf[CVBS]; /* biggest volume id is 2^32, ~ 4*10^9 */
unsigned int changed;
struct server *(oldhosts[NMAXNSERVERS]);
AFS_STATCNT(CheckVLDB);
afs_FinalizeReq(areq);
if ((i = afs_InitReq(&treq, afs_osi_credp)))
return DUNNO;
v = afs_osi_Alloc(sizeof(*v));
tcell = afs_GetCell(afid->Cell, READ_LOCK);
bp = afs_cv2string(&tbuf[CVBS], afid->Fid.Volume);
do {
VSleep(2); /* Better safe than sorry. */
tconn =
afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum,
&treq, SHARED_LOCK);
if (tconn) {
if (tconn->srvr->server->flags & SNO_LHOSTS) {
type = 0;
RX_AFS_GUNLOCK();
i = VL_GetEntryByNameO(tconn->id, bp, &v->tve);
RX_AFS_GLOCK();
} else if (tconn->srvr->server->flags & SYES_LHOSTS) {
type = 1;
RX_AFS_GUNLOCK();
i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve);
RX_AFS_GLOCK();
} else {
type = 2;
RX_AFS_GUNLOCK();
i = VL_GetEntryByNameU(tconn->id, bp, &v->utve);
RX_AFS_GLOCK();
if (!(tconn->srvr->server->flags & SVLSRV_UUID)) {
if (i == RXGEN_OPCODE) {
type = 1;
RX_AFS_GUNLOCK();
i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve);
RX_AFS_GLOCK();
if (i == RXGEN_OPCODE) {
type = 0;
tconn->srvr->server->flags |= SNO_LHOSTS;
RX_AFS_GUNLOCK();
i = VL_GetEntryByNameO(tconn->id, bp, &v->tve);
RX_AFS_GLOCK();
} else if (!i)
tconn->srvr->server->flags |= SYES_LHOSTS;
} else if (!i)
tconn->srvr->server->flags |= SVLSRV_UUID;
}
lastcode = i;
}
} else
i = -1;
} while (afs_Analyze(tconn, i, NULL, &treq, -1, /* no op code for this */
SHARED_LOCK, tcell));
afs_PutCell(tcell, READ_LOCK);
afs_Trace2(afs_iclSetp, CM_TRACE_CHECKVLDB, ICL_TYPE_FID, &afid,
ICL_TYPE_INT32, i);
if (i) {
afs_osi_Free(v, sizeof(*v));
return DUNNO;
}
/* have info, copy into serverHost array */
changed = 0;
tvp = afs_FindVolume(afid, WRITE_LOCK);
if (tvp) {
ObtainWriteLock(&tvp->lock, 107);
for (i = 0; i < NMAXNSERVERS && tvp->serverHost[i]; i++) {
oldhosts[i] = tvp->serverHost[i];
}
if (type == 2) {
InstallUVolumeEntry(tvp, &v->utve, afid->Cell, tcell, &treq);
} else if (type == 1) {
InstallNVolumeEntry(tvp, &v->ntve, afid->Cell);
} else {
InstallVolumeEntry(tvp, &v->tve, afid->Cell);
}
if (i < NMAXNSERVERS && tvp->serverHost[i]) {
changed = 1;
}
for (--i; !changed && i >= 0; i--) {
if (tvp->serverHost[i] != oldhosts[i]) {
//.........这里部分代码省略.........
示例11: afs_newslot
/* lp is pointer to a fairly-old buffer */
static struct buffer *
afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp)
{
/* Find a usable buffer slot */
afs_int32 i;
afs_int32 lt = 0;
struct buffer *tp;
struct osi_file *tfile;
AFS_STATCNT(afs_newslot);
/* we take a pointer here to a buffer which was at the end of an
* LRU hash chain. Odds are, it's one of the older buffers, not
* one of the newer. Having an older buffer to start with may
* permit us to avoid a few of the assignments in the "typical
* case" for loop below.
*/
if (lp && (lp->lockers == 0)) {
lt = lp->accesstime;
} else {
lp = NULL;
}
/* timecounter might have wrapped, if machine is very very busy
* and stays up for a long time. Timecounter mustn't wrap twice
* (positive->negative->positive) before calling newslot, but that
* would require 2 billion consecutive cache hits... Anyway, the
* penalty is only that the cache replacement policy will be
* almost MRU for the next ~2 billion DReads... newslot doesn't
* get called nearly as often as DRead, so in order to avoid the
* performance penalty of using the hypers, it's worth doing the
* extra check here every time. It's probably cheaper than doing
* hcmp, anyway. There is a little performance hit resulting from
* resetting all the access times to 0, but it only happens once
* every month or so, and the access times will rapidly sort
* themselves back out after just a few more DReads.
*/
if (timecounter < 0) {
timecounter = 1;
tp = Buffers;
for (i = 0; i < nbuffers; i++, tp++) {
tp->accesstime = 0;
if (!lp && !tp->lockers) /* one is as good as the rest, I guess */
lp = tp;
}
} else {
/* this is the typical case */
tp = Buffers;
for (i = 0; i < nbuffers; i++, tp++) {
if (tp->lockers == 0) {
if (!lp || tp->accesstime < lt) {
lp = tp;
lt = tp->accesstime;
}
}
}
}
if (lp == 0) {
/* No unlocked buffers. If still possible, allocate a new increment */
if (nbuffers + NPB > afs_max_buffers) {
/* There are no unlocked buffers -- this used to panic, but that
* seems extreme. To the best of my knowledge, all the callers
* of DRead are prepared to handle a zero return. Some of them
* just panic directly, but not all of them. */
afs_warn("afs: all buffers locked\n");
return 0;
}
BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
osi_Assert(BufferData != NULL);
for (i = 0; i< NPB; i++) {
/* Fill in each buffer with an empty indication. */
tp = &Buffers[i + nbuffers];
tp->fid = NULLIDX;
afs_reset_inode(&tp->inode);
tp->accesstime = 0;
tp->lockers = 0;
tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
tp->hashIndex = 0;
tp->dirty = 0;
AFS_RWLOCK_INIT(&tp->lock, "buffer lock");
}
lp = &Buffers[nbuffers];
nbuffers += NPB;
}
if (lp->dirty) {
/* see DFlush for rationale for not getting and locking the dcache */
tfile = afs_CFileOpen(&lp->inode);
afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
AFS_BUFFER_PAGESIZE);
lp->dirty = 0;
afs_CFileClose(tfile);
AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
}
/* Now fill in the header. */
lp->fid = adc->index;
afs_copy_inode(&lp->inode, &adc->f.inode);
//.........这里部分代码省略.........
示例12: afs_mountroot
int
afs_mountroot(void)
{
AFS_STATCNT(afs_mountroot);
return (EINVAL);
}
示例13: afs_fid
afs_fid(OSI_VC_DECL(avc), struct fid **fidpp)
#endif /* AFS_AIX41_ENV */
{
struct SmallFid Sfid;
long addr[2];
register struct cell *tcell;
extern struct vcache *afs_globalVp;
int SizeOfSmallFid = SIZEOF_SMALLFID;
int rootvp = 0;
OSI_VC_CONVERT(avc);
AFS_STATCNT(afs_fid);
if (afs_shuttingdown)
return EIO;
if (afs_NFSRootOnly && (avc == afs_globalVp))
rootvp = 1;
if (!afs_NFSRootOnly || rootvp
#ifdef AFS_AIX41_ENV
|| USE_SMALLFID(credp)
#endif
) {
tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
Sfid.Volume = avc->f.fid.Fid.Volume;
Sfid.Vnode = avc->f.fid.Fid.Vnode;
Sfid.CellAndUnique =
((tcell->cellIndex << 24) + (avc->f.fid.Fid.Unique & 0xffffff));
afs_PutCell(tcell, READ_LOCK);
if (avc->f.fid.Fid.Vnode > 0xffff)
afs_fid_vnodeoverflow++;
if (avc->f.fid.Fid.Unique > 0xffffff)
afs_fid_uniqueoverflow++;
} else {
#if defined(AFS_SUN57_64BIT_ENV) || (defined(AFS_SGI61_ENV) && (_MIPS_SZPTR == 64))
addr[1] = (long)AFS_XLATOR_MAGIC << 48;
#else /* defined(AFS_SGI61_ENV) && (_MIPS_SZPTR == 64) */
addr[1] = AFS_XLATOR_MAGIC;
SizeOfSmallFid = sizeof(addr);
#endif /* defined(AFS_SGI61_ENV) && (_MIPS_SZPTR == 64) */
addr[0] = (long)avc;
#ifndef AFS_AIX41_ENV
/* No post processing, so don't hold ref count. */
AFS_FAST_HOLD(avc);
#endif
}
#if defined(AFS_AIX_ENV) || defined(AFS_SUN54_ENV)
/* Use the fid pointer passed to us. */
fidpp->fid_len = SizeOfSmallFid;
if (afs_NFSRootOnly) {
if (rootvp
#ifdef AFS_AIX41_ENV
|| USE_SMALLFID(credp)
#endif
) {
memcpy(fidpp->fid_data, (caddr_t) & Sfid, SizeOfSmallFid);
} else {
memcpy(fidpp->fid_data, (caddr_t) addr, SizeOfSmallFid);
}
} else {
memcpy(fidpp->fid_data, (caddr_t) & Sfid, SizeOfSmallFid);
}
#else
/* malloc a fid pointer ourselves. */
*fidpp = (struct fid *)AFS_KALLOC(SizeOfSmallFid + 2);
(*fidpp)->fid_len = SizeOfSmallFid;
if (afs_NFSRootOnly) {
if (rootvp) {
memcpy((*fidpp)->fid_data, (char *)&Sfid, SizeOfSmallFid);
} else {
memcpy((*fidpp)->fid_data, (char *)addr, SizeOfSmallFid);
}
} else {
memcpy((*fidpp)->fid_data, (char *)&Sfid, SizeOfSmallFid);
}
#endif
return (0);
}
示例14: afs_swapvp
int
afs_swapvp(void)
{
AFS_STATCNT(afs_swapvp);
return (EINVAL);
}
示例15: afs_NewCell
/*!
* Create or update a cell entry.
* \param acellName Name of cell.
* \param acellHosts Array of hosts that this cell has.
* \param aflags Cell flags.
* \param linkedcname
* \param fsport File server port.
* \param vlport Volume server port.
* \param timeout Cell timeout value, 0 means static AFSDB entry.
* \return
*/
afs_int32
afs_NewCell(char *acellName, afs_int32 * acellHosts, int aflags,
char *linkedcname, u_short fsport, u_short vlport, int timeout)
{
struct cell *tc, *tcl = 0;
afs_int32 i, newc = 0, code = 0;
AFS_STATCNT(afs_NewCell);
ObtainWriteLock(&afs_xcell, 103);
tc = afs_FindCellByName_nl(acellName, READ_LOCK);
if (tc) {
aflags &= ~CNoSUID;
} else {
tc = afs_osi_Alloc(sizeof(struct cell));
osi_Assert(tc != NULL);
memset(tc, 0, sizeof(*tc));
tc->cellName = afs_strdup(acellName);
tc->fsport = AFS_FSPORT;
tc->vlport = AFS_VLPORT;
AFS_MD5_String(tc->cellHandle, tc->cellName, strlen(tc->cellName));
AFS_RWLOCK_INIT(&tc->lock, "cell lock");
newc = 1;
aflags |= CNoSUID;
}
ObtainWriteLock(&tc->lock, 688);
/* If the cell we've found has the correct name but no timeout,
* and we're called with a non-zero timeout, bail out: never
* override static configuration entries with AFSDB ones.
* One exception: if the original cell entry had no servers,
* it must get servers from AFSDB.
*/
if (timeout && !tc->timeout && tc->cellHosts[0]) {
code = EEXIST; /* This code is checked for in afs_LookupAFSDB */
goto bad;
}
/* we don't want to keep pinging old vlservers which were down,
* since they don't matter any more. It's easier to do this than
* to remove the server from its various hash tables. */
for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
if (!tc->cellHosts[i])
break;
tc->cellHosts[i]->flags &= ~SRVR_ISDOWN;
tc->cellHosts[i]->flags |= SRVR_ISGONE;
}
if (fsport)
tc->fsport = fsport;
if (vlport)
tc->vlport = vlport;
if (aflags & CLinkedCell) {
if (!linkedcname) {
code = EINVAL;
goto bad;
}
tcl = afs_FindCellByName_nl(linkedcname, READ_LOCK);
if (!tcl) {
code = ENOENT;
goto bad;
}
if (tcl->lcellp) { /* XXX Overwriting if one existed before! XXX */
tcl->lcellp->lcellp = (struct cell *)0;
tcl->lcellp->states &= ~CLinkedCell;
}
tc->lcellp = tcl;
tcl->lcellp = tc;
}
tc->states |= aflags;
tc->timeout = timeout;
memset(tc->cellHosts, 0, sizeof(tc->cellHosts));
for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
/* Get server for each host and link this cell in.*/
struct server *ts;
afs_uint32 temp = acellHosts[i];
if (!temp)
break;
ts = afs_GetServer(&temp, 1, 0, tc->vlport, WRITE_LOCK, NULL, 0);
ts->cell = tc;
ts->flags &= ~SRVR_ISGONE;
/* Set the server as a host of the new cell. */
tc->cellHosts[i] = ts;
afs_PutServer(ts, WRITE_LOCK);
}
afs_SortServers(tc->cellHosts, AFS_MAXCELLHOSTS); /* randomize servers */
//.........这里部分代码省略.........