本文整理汇总了C++中ReleaseWriteLock函数的典型用法代码示例。如果您正苦于以下问题:C++ ReleaseWriteLock函数的具体用法?C++ ReleaseWriteLock怎么用?C++ ReleaseWriteLock使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ReleaseWriteLock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: afs_NewCellAlias
/*!
* Create new cell alias entry and update dynroot vnode.
* \param alias
* \param cell
* \return
*/
afs_int32
afs_NewCellAlias(char *alias, char *cell)
{
struct cell_alias *tc;
ObtainSharedLock(&afs_xcell, 681);
if (afs_CellOrAliasExists_nl(alias)) {
ReleaseSharedLock(&afs_xcell);
return EEXIST;
}
UpgradeSToWLock(&afs_xcell, 682);
tc = afs_osi_Alloc(sizeof(struct cell_alias));
osi_Assert(tc != NULL);
tc->alias = afs_strdup(alias);
tc->cell = afs_strdup(cell);
tc->next = afs_cellalias_head;
tc->index = afs_cellalias_index++;
afs_cellalias_head = tc;
ReleaseWriteLock(&afs_xcell);
afs_DynrootInvalidate();
return 0;
}
示例2: osi_UFSTruncate
int
osi_UFSTruncate(struct osi_file *afile, afs_int32 asize)
{
afs_ucred_t *oldCred;
struct vattr tvattr;
afs_int32 code;
struct osi_stat tstat;
AFS_STATCNT(osi_Truncate);
/* This routine only shrinks files, and most systems
* have very slow truncates, even when the file is already
* small enough. Check now and save some time.
*/
code = afs_osi_Stat(afile, &tstat);
if (code || tstat.size <= asize)
return code;
ObtainWriteLock(&afs_xosi, 321);
tvattr.va_mask = AT_SIZE;
tvattr.va_size = asize;
/*
* The only time a flag is used (ATTR_UTIME) is when we're changing the time
*/
AFS_GUNLOCK();
#ifdef AFS_SUN510_ENV
{
caller_context_t ct;
code = VOP_SETATTR(afile->vnode, &tvattr, 0, afs_osi_credp, &ct);
}
#else
code = VOP_SETATTR(afile->vnode, &tvattr, 0, afs_osi_credp);
#endif
AFS_GLOCK();
ReleaseWriteLock(&afs_xosi);
return code;
}
示例3: afs_osi_Stat
int
afs_osi_Stat(struct osi_file *afile, struct osi_stat *astat)
{
afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
ObtainWriteLock(&afs_xosi, 320);
/* Ufs doesn't seem to care about the flags so we pass 0 for now */
tvattr.va_mask = AT_ALL;
AFS_GUNLOCK();
#ifdef AFS_SUN511_ENV
code = VOP_GETATTR(afile->vnode, &tvattr, 0, afs_osi_credp, NULL);
#else
code = VOP_GETATTR(afile->vnode, &tvattr, 0, afs_osi_credp);
#endif
AFS_GLOCK();
if (code == 0) {
astat->size = tvattr.va_size;
astat->mtime = tvattr.va_mtime.tv_sec;
astat->atime = tvattr.va_atime.tv_sec;
}
ReleaseWriteLock(&afs_xosi);
return code;
}
示例4: afs_FlushCBs
/* afs_FlushCBs
* to be used only in dire circumstances, this drops all callbacks on
* the floor, without giving them back to the server. It's ok, the server can
* deal with it, but it is a little bit rude.
*/
void
afs_FlushCBs(void)
{
register int i;
register struct vcache *tvc;
ObtainWriteLock(&afs_xcbhash, 86); /* pretty likely I'm going to remove something */
for (i = 0; i < VCSIZE; i++) /* reset all the vnodes */
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
tvc->callback = 0;
tvc->dchint = NULL; /* invalidate hints */
tvc->f.states &= ~(CStatd);
if (QPrev(&(tvc->callsort)))
QRemove(&(tvc->callsort));
if (!(tvc->f.states & (CVInit|CVFlushed)) &&
((tvc->f.fid.Fid.Vnode & 1) || (vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
}
afs_InitCBQueue(0);
ReleaseWriteLock(&afs_xcbhash);
}
示例5: afs_SetupVolume
/**
*
* @param volid Volume ID. If it's 0, get it from the name.
* @param aname Volume name.
* @param ve Volume entry.
* @param tcell The cell containing this volume.
* @param agood
* @param type Type of volume.
* @param areq Request.
* @return Volume or NULL if failure.
*/
static struct volume *
afs_SetupVolume(afs_int32 volid, char *aname, void *ve, struct cell *tcell,
afs_int32 agood, afs_int32 type, struct vrequest *areq)
{
struct volume *tv;
struct vldbentry *ove = (struct vldbentry *)ve;
struct nvldbentry *nve = (struct nvldbentry *)ve;
struct uvldbentry *uve = (struct uvldbentry *)ve;
int whichType; /* which type of volume to look for */
int i, j, err = 0;
if (!volid) {
int len;
/* special hint from file server to use vlserver */
len = strlen(aname);
if (len >= 8 && strcmp(aname + len - 7, ".backup") == 0)
whichType = BACKVOL;
else if (len >= 10 && strcmp(aname + len - 9, ".readonly") == 0)
whichType = ROVOL;
else
whichType = RWVOL;
/* figure out which one we're really interested in (a set is returned) */
volid = afs_vtoi(aname);
if (volid == 0) {
if (type == 2) {
volid = uve->volumeId[whichType];
} else if (type == 1) {
volid = nve->volumeId[whichType];
} else {
volid = ove->volumeId[whichType];
}
} /* end of if (volid == 0) */
} /* end of if (!volid) */
ObtainWriteLock(&afs_xvolume, 108);
i = VHash(volid);
for (tv = afs_volumes[i]; tv; tv = tv->next) {
if (tv->volume == volid && tv->cell == tcell->cellNum) {
break;
}
}
if (!tv) {
struct fvolume *tf = 0;
tv = afs_GetVolSlot();
if (!tv) {
ReleaseWriteLock(&afs_xvolume);
return NULL;
}
memset(tv, 0, sizeof(struct volume));
for (j = fvTable[FVHash(tcell->cellNum, volid)]; j != 0; j = tf->next) {
if (afs_FVIndex != j) {
struct osi_file *tfile;
tfile = osi_UFSOpen(&volumeInode);
err =
afs_osi_Read(tfile, sizeof(struct fvolume) * j,
&staticFVolume, sizeof(struct fvolume));
osi_UFSClose(tfile);
if (err != sizeof(struct fvolume)) {
afs_warn("afs_SetupVolume: error %d reading volumeinfo\n",
(int)err);
/* put tv back on the free list; the data in it is not valid */
tv->next = afs_freeVolList;
afs_freeVolList = tv;
/* staticFVolume contents are not valid */
afs_FVIndex = -1;
ReleaseWriteLock(&afs_xvolume);
return NULL;
}
afs_FVIndex = j;
}
tf = &staticFVolume;
if (tf->cell == tcell->cellNum && tf->volume == volid)
break;
}
tv->cell = tcell->cellNum;
AFS_RWLOCK_INIT(&tv->lock, "volume lock");
tv->next = afs_volumes[i]; /* thread into list */
afs_volumes[i] = tv;
tv->volume = volid;
if (tf && (j != 0)) {
tv->vtix = afs_FVIndex;
tv->mtpoint = tf->mtpoint;
//.........这里部分代码省略.........
示例6: afs_TransitionToBypass
/*
* This is almost exactly like the PFlush() routine in afs_pioctl.c,
* but that routine is static. We are about to change a file from
* normal caching to bypass it's caching. Therefore, we want to
* free up any cache space in use by the file, and throw out any
* existing VM pages for the file. We keep track of the number of
* times we go back and forth from caching to bypass.
*/
void
afs_TransitionToBypass(struct vcache *avc,
afs_ucred_t *acred, int aflags)
{
afs_int32 code;
struct vrequest treq;
int setDesire = 0;
int setManual = 0;
if (!avc)
return;
if (aflags & TRANSChangeDesiredBit)
setDesire = 1;
if (aflags & TRANSSetManualBit)
setManual = 1;
#ifdef AFS_BOZONLOCK_ENV
afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */
#else
AFS_GLOCK();
#endif
ObtainWriteLock(&avc->lock, 925);
/*
* Someone may have beat us to doing the transition - we had no lock
* when we checked the flag earlier. No cause to panic, just return.
*/
if (avc->cachingStates & FCSBypass)
goto done;
/* If we never cached this, just change state */
if (setDesire && (!(avc->cachingStates & FCSBypass))) {
avc->cachingStates |= FCSBypass;
goto done;
}
/* cg2v, try to store any chunks not written 20071204 */
if (avc->execsOrWriters > 0) {
code = afs_InitReq(&treq, acred);
if (!code)
code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE);
}
#if 0
/* also cg2v, don't dequeue the callback */
ObtainWriteLock(&afs_xcbhash, 956);
afs_DequeueCallback(avc);
ReleaseWriteLock(&afs_xcbhash);
#endif
avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
osi_dnlc_purgedp(avc);
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
}
avc->cachingStates |= FCSBypass; /* Set the bypass flag */
if(setDesire)
avc->cachingStates |= FCSDesireBypass;
if(setManual)
avc->cachingStates |= FCSManuallySet;
avc->cachingTransitions++;
done:
ReleaseWriteLock(&avc->lock);
#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&avc->pvnLock, avc);
#else
AFS_GUNLOCK();
#endif
}
示例7: afs_InvalidateAllSegments
int
afs_InvalidateAllSegments(struct vcache *avc)
{
struct dcache *tdc;
afs_int32 hash;
afs_int32 index;
struct dcache **dcList;
int i, dcListMax, dcListCount;
AFS_STATCNT(afs_InvalidateAllSegments);
afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
hash = DVHash(&avc->f.fid);
avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
avc->f.states &= ~CExtendedFile; /* not any more */
ObtainWriteLock(&afs_xcbhash, 459);
afs_DequeueCallback(avc);
avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */
ReleaseWriteLock(&afs_xcbhash);
if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
/* Blow away pages; for now, only for Solaris */
#if (defined(AFS_SUN5_ENV))
if (WriteLocked(&avc->lock))
osi_ReleaseVM(avc, (afs_ucred_t *)0);
#endif
/*
* Block out others from screwing with this table; is a read lock
* sufficient?
*/
ObtainWriteLock(&afs_xdcache, 286);
dcListMax = 0;
for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetValidDSlot(index);
if (!tdc) {
/* In the case of fatal errors during stores, we MUST
* invalidate all of the relevant chunks. Otherwise, the chunks
* will be left with the 'new' data that was never successfully
* written to the server, but the DV in the dcache is still the
* old DV. So, we may indefinitely serve data to applications
* that is not actually in the file on the fileserver. If we
* cannot afs_GetValidDSlot the appropriate entries, currently
* there is no way to ensure the dcache is invalidated. So for
* now, to avoid risking serving bad data from the cache, panic
* instead. */
osi_Panic("afs_InvalidateAllSegments tdc count");
}
ReleaseReadLock(&tdc->tlock);
if (!FidCmp(&tdc->f.fid, &avc->f.fid))
dcListMax++;
afs_PutDCache(tdc);
}
index = afs_dvnextTbl[index];
}
dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
dcListCount = 0;
for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetValidDSlot(index);
if (!tdc) {
/* We cannot proceed after getting this error; we risk serving
* incorrect data to applications. So panic instead. See the
* above comment next to the previous afs_GetValidDSlot call
* for details. */
osi_Panic("afs_InvalidateAllSegments tdc store");
}
ReleaseReadLock(&tdc->tlock);
if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
/* same file? we'll zap it */
if (afs_indexFlags[index] & IFDataMod) {
afs_stats_cmperf.cacheCurrDirtyChunks--;
/* don't write it back */
afs_indexFlags[index] &= ~IFDataMod;
}
afs_indexFlags[index] &= ~IFAnyPages;
if (dcListCount < dcListMax)
dcList[dcListCount++] = tdc;
else
afs_PutDCache(tdc);
} else {
afs_PutDCache(tdc);
}
}
index = afs_dvnextTbl[index];
}
ReleaseWriteLock(&afs_xdcache);
for (i = 0; i < dcListCount; i++) {
tdc = dcList[i];
ObtainWriteLock(&tdc->lock, 679);
ZapDCE(tdc);
if (vType(avc) == VDIR)
DZap(tdc);
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
//.........这里部分代码省略.........
示例8: unlock_cmdLine
void
unlock_cmdLine(void)
{
ReleaseWriteLock(&cmdLineLock);
}
示例9: afs_MemRead
//.........这里部分代码省略.........
ObtainReadLock(&tdc->lock);
/* now, first try to start transfer, if we'll need the data. If
* data already coming, we don't need to do this, obviously. Type
* 2 requests never return a null dcache entry, btw.
*/
if (!(tdc->dflags & DFFetching)
&& !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
/* have cache entry, it is not coming in now,
* and we'll need new data */
tagain:
#ifdef STRUCT_TASK_STRUCT_HAS_CRED
if (trybusy && (!afs_BBusy() || (afs_protocols & VICEP_ACCESS))) {
#else
if (trybusy && !afs_BBusy()) {
#endif
struct brequest *bp;
/* daemon is not busy */
ObtainSharedLock(&tdc->mflock, 665);
if (!(tdc->mflags & DFFetchReq)) {
int dontwait = B_DONTWAIT;
/* start the daemon (may already be running, however) */
UpgradeSToWLock(&tdc->mflock, 666);
tdc->mflags |= DFFetchReq;
#ifdef STRUCT_TASK_STRUCT_HAS_CRED
if (afs_protocols & VICEP_ACCESS)
dontwait = 0;
#endif
bp = afs_BQueue(BOP_FETCH, avc, dontwait, 0, acred,
(afs_size_t) filePos, (afs_size_t) 0,
tdc, (void *)0, (void *)0);
if (!bp) {
tdc->mflags &= ~DFFetchReq;
trybusy = 0; /* Avoid bkg daemon since they're too busy */
ReleaseWriteLock(&tdc->mflock);
goto tagain;
}
ConvertWToSLock(&tdc->mflock);
/* don't use bp pointer! */
}
code = 0;
ConvertSToRLock(&tdc->mflock);
while (!code && tdc->mflags & DFFetchReq) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32,
__LINE__, ICL_TYPE_POINTER, tdc,
ICL_TYPE_INT32, tdc->dflags);
/* don't need waiting flag on this one */
ReleaseReadLock(&tdc->mflock);
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&avc->lock);
code = afs_osi_SleepSig(&tdc->validPos);
ObtainReadLock(&avc->lock);
ObtainReadLock(&tdc->lock);
ObtainReadLock(&tdc->mflock);
}
ReleaseReadLock(&tdc->mflock);
if (code) {
error = code;
break;
}
}
}
/* now data may have started flowing in (if DFFetching is on). If
* data is now streaming in, then wait for some interesting stuff.
*/
code = 0;
示例10: afs_RebuildDynroot
//.........这里部分代码省略.........
afs_osi_Free(dotCell, dotLen);
afs_PutCellAlias(ca);
}
maxaliasidx = aliasidx;
ObtainReadLock(&afs_dynSymlinkLock);
ts = afs_dynSymlinkBase;
while (ts) {
afs_dynroot_computeDirEnt(ts->name, &curPage, &curChunk);
ts = ts->next;
}
dirSize = (curPage + 1) * AFS_PAGESIZE;
newDir = afs_osi_Alloc(dirSize);
/*
* Now actually construct the directory.
*/
curChunk = 13;
curPage = 0;
dirHeader = (struct DirHeader *)newDir;
dirHeader->header.pgcount = 0;
dirHeader->header.tag = htons(1234);
dirHeader->header.freecount = 0;
dirHeader->header.freebitmap[0] = 0xff;
dirHeader->header.freebitmap[1] = 0x1f;
for (i = 2; i < EPP / 8; i++)
dirHeader->header.freebitmap[i] = 0;
dirHeader->alloMap[0] = EPP - DHE - 1;
for (i = 1; i < MAXPAGES; i++)
dirHeader->alloMap[i] = EPP;
for (i = 0; i < NHASHENT; i++)
dirHeader->hashTable[i] = 0;
/* Install ".", "..", and the dynamic mount directory */
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, ".", 1);
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, "..", 1);
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk,
AFS_DYNROOT_MOUNTNAME, AFS_DYNROOT_MOUNT_VNODE);
linkCount += 3;
for (cellidx = 0; cellidx < maxcellidx; cellidx++) {
c = afs_GetCellByIndex(cellidx, READ_LOCK);
if (!c)
continue;
if (c->cellNum == afs_dynrootCell)
continue;
dotLen = strlen(c->cellName) + 2;
dotCell = afs_osi_Alloc(dotLen);
strcpy(dotCell, ".");
afs_strcat(dotCell, c->cellName);
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, c->cellName,
VNUM_FROM_CIDX_RW(cellidx, 0));
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, dotCell,
VNUM_FROM_CIDX_RW(cellidx, 1));
afs_osi_Free(dotCell, dotLen);
linkCount += 2;
afs_PutCell(c, READ_LOCK);
}
for (aliasidx = 0; aliasidx < maxaliasidx; aliasidx++) {
ca = afs_GetCellAlias(aliasidx);
if (!ca)
continue;
dotLen = strlen(ca->alias) + 2;
dotCell = afs_osi_Alloc(dotLen);
strcpy(dotCell, ".");
afs_strcat(dotCell, ca->alias);
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, ca->alias,
VNUM_FROM_CAIDX_RW(aliasidx, 0));
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, dotCell,
VNUM_FROM_CAIDX_RW(aliasidx, 1));
afs_osi_Free(dotCell, dotLen);
afs_PutCellAlias(ca);
}
ts = afs_dynSymlinkBase;
while (ts) {
int vnum = VNUM_FROM_TYPEID(VN_TYPE_SYMLINK, ts->index);
afs_dynroot_addDirEnt(dirHeader, &curPage, &curChunk, ts->name, vnum);
ts = ts->next;
}
ReleaseReadLock(&afs_dynSymlinkLock);
ObtainWriteLock(&afs_dynrootDirLock, 549);
if (afs_dynrootDir)
afs_osi_Free(afs_dynrootDir, afs_dynrootDirLen);
afs_dynrootDir = newDir;
afs_dynrootDirLen = dirSize;
afs_dynrootDirLinkcnt = linkCount;
afs_dynrootDirVersion = newVersion;
ReleaseWriteLock(&afs_dynrootDirLock);
}
示例11: osi_dnlc_enter
int
osi_dnlc_enter(struct vcache *adp, char *aname, struct vcache *avc,
afs_hyper_t * avno)
{
struct nc *tnc;
unsigned int key, skey;
char *ts = aname;
int safety;
if (!afs_usednlc)
return 0;
TRACE(osi_dnlc_enterT, 0);
dnlcHash(ts, key); /* leaves ts pointing at the NULL */
if (ts - aname >= AFSNCNAMESIZE) {
return 0;
}
skey = key & (NHSIZE - 1);
dnlcstats.enters++;
retry:
ObtainWriteLock(&afs_xdnlc, 222);
/* Only cache entries from the latest version of the directory */
if (!(adp->f.states & CStatd) || !hsame(*avno, adp->f.m.DataVersion)) {
ReleaseWriteLock(&afs_xdnlc);
return 0;
}
/*
* Make sure each directory entry gets cached no more than once.
*/
for (tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) {
if ((tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) {
/* duplicate entry */
break;
} else if (tnc->next == nameHash[skey]) { /* end of list */
tnc = NULL;
break;
} else if (safety > NCSIZE) {
afs_warn("DNLC cycle");
dnlcstats.cycles++;
ReleaseWriteLock(&afs_xdnlc);
osi_dnlc_purge();
goto retry;
}
}
if (tnc == NULL) {
tnc = GetMeAnEntry();
tnc->dirp = adp;
tnc->vp = avc;
tnc->key = key;
memcpy((char *)tnc->name, aname, ts - aname + 1); /* include the NULL */
InsertEntry(tnc);
} else {
/* duplicate */
tnc->vp = avc;
}
ReleaseWriteLock(&afs_xdnlc);
return 0;
}
示例12: DRead
int
DRead(struct dcache *adc, int page, struct DirBuffer *entry)
{
/* Read a page from the disk. */
struct buffer *tb, *tb2;
struct osi_file *tfile;
int code;
AFS_STATCNT(DRead);
memset(entry, 0, sizeof(struct DirBuffer));
ObtainWriteLock(&afs_bufferLock, 256);
#define bufmatch(tb) (tb->page == page && tb->fid == adc->index)
#define buf_Front(head,parent,p) {(parent)->hashNext = (p)->hashNext; (p)->hashNext= *(head);*(head)=(p);}
/* this apparently-complicated-looking code is simply an example of
* a little bit of loop unrolling, and is a standard linked-list
* traversal trick. It saves a few assignments at the the expense
* of larger code size. This could be simplified by better use of
* macros.
*/
if ((tb = phTable[pHash(adc->index, page)])) {
if (bufmatch(tb)) {
ObtainWriteLock(&tb->lock, 257);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb->lock);
entry->buffer = tb;
entry->data = tb->data;
return 0;
} else {
struct buffer **bufhead;
bufhead = &(phTable[pHash(adc->index, page)]);
while ((tb2 = tb->hashNext)) {
if (bufmatch(tb2)) {
buf_Front(bufhead, tb, tb2);
ObtainWriteLock(&tb2->lock, 258);
tb2->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb2->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb2->lock);
entry->buffer = tb2;
entry->data = tb2->data;
return 0;
}
if ((tb = tb2->hashNext)) {
if (bufmatch(tb)) {
buf_Front(bufhead, tb2, tb);
ObtainWriteLock(&tb->lock, 259);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
tb->accesstime = timecounter++;
AFS_STATS(afs_stats_cmperf.bufHits++);
ReleaseWriteLock(&tb->lock);
entry->buffer = tb;
entry->data = tb->data;
return 0;
}
} else
break;
}
}
} else
tb2 = NULL;
AFS_STATS(afs_stats_cmperf.bufMisses++);
/* can't find it */
/* The last thing we looked at was either tb or tb2 (or nothing). That
* is at least the oldest buffer on one particular hash chain, so it's
* a pretty good place to start looking for the truly oldest buffer.
*/
tb = afs_newslot(adc, page, (tb ? tb : tb2));
if (!tb) {
ReleaseWriteLock(&afs_bufferLock);
return EIO;
}
ObtainWriteLock(&tb->lock, 260);
tb->lockers++;
ReleaseWriteLock(&afs_bufferLock);
if (page * AFS_BUFFER_PAGESIZE >= adc->f.chunkBytes) {
tb->fid = NULLIDX;
afs_reset_inode(&tb->inode);
tb->lockers--;
ReleaseWriteLock(&tb->lock);
return EIO;
}
tfile = afs_CFileOpen(&adc->f.inode);
code =
afs_CFileRead(tfile, tb->page * AFS_BUFFER_PAGESIZE, tb->data,
AFS_BUFFER_PAGESIZE);
afs_CFileClose(tfile);
if (code < AFS_BUFFER_PAGESIZE) {
tb->fid = NULLIDX;
afs_reset_inode(&tb->inode);
tb->lockers--;
//.........这里部分代码省略.........
示例13: afs_ProcessOpCreate
//.........这里部分代码省略.........
}
if (vType(avc) == VDIR) {
/* Change fid in the dir for the "." entry. ".." has alredy been
* handled by afs_FixChildrenFids when processing the parent dir.
*/
tdc = afs_FindDCacheByFid(&avc->f.fid);
if (tdc) {
afs_dir_ChangeFid(tdc, ".", &avc->f.fid.Fid.Vnode,
&newFid.Fid.Vnode);
if (avc->f.m.LinkCount >= 2)
/* For non empty dirs, fix children's parentVnode and
* parentUnique reference.
*/
afs_FixChildrenFids(&avc->f.fid, &newFid);
afs_PutDCache(tdc);
}
}
/* Recompute hash chain positions for vnode and dcaches.
* Then change to the new FID.
*/
/* The vcache goes first. */
ObtainWriteLock(&afs_xvcache, 735);
/* Old fid hash. */
hash = VCHash(&avc->f.fid);
/* New fid hash. */
new_hash = VCHash(&newFid);
/* Remove hash from old position. */
/* XXX: not checking array element contents. It shouldn't be empty.
* If it oopses, then something else might be wrong.
*/
if (afs_vhashT[hash] == avc) {
/* First in hash chain (might be the only one). */
afs_vhashT[hash] = avc->hnext;
} else {
/* More elements in hash chain. */
for (tvc = afs_vhashT[hash]; tvc; tvc = tvc->hnext) {
if (tvc->hnext == avc) {
tvc->hnext = avc->hnext;
break;
}
}
} /* if (!afs_vhashT[i]->hnext) */
QRemove(&avc->vhashq);
/* Insert hash in new position. */
avc->hnext = afs_vhashT[new_hash];
afs_vhashT[new_hash] = avc;
QAdd(&afs_vhashTV[VCHashV(&newFid)], &avc->vhashq);
ReleaseWriteLock(&afs_xvcache);
/* Do the same thing for all dcaches. */
hash = DVHash(&avc->f.fid);
ObtainWriteLock(&afs_xdcache, 743);
for (index = afs_dvhashTbl[hash]; index != NULLIDX; index = hash) {
hash = afs_dvnextTbl[index];
tdc = afs_GetValidDSlot(index);
ReleaseReadLock(&tdc->tlock);
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
/* Safer but slower. */
afs_HashOutDCache(tdc, 0);
/* Put dcache in new positions in the dchash and dvhash. */
new_hash = DCHash(&newFid, tdc->f.chunk);
afs_dcnextTbl[tdc->index] = afs_dchashTbl[new_hash];
afs_dchashTbl[new_hash] = tdc->index;
new_hash = DVHash(&newFid);
afs_dvnextTbl[tdc->index] = afs_dvhashTbl[new_hash];
afs_dvhashTbl[new_hash] = tdc->index;
afs_indexUnique[tdc->index] = newFid.Fid.Unique;
memcpy(&tdc->f.fid, &newFid, sizeof(struct VenusFid));
} /* if fid match */
} /* if uniquifier match */
if (tdc)
afs_PutDCache(tdc);
} /* for all dcaches in this hash bucket */
ReleaseWriteLock(&afs_xdcache);
/* Now we can set the new fid. */
memcpy(&avc->f.fid, &newFid, sizeof(struct VenusFid));
end:
if (tdp)
afs_PutVCache(tdp);
afs_osi_Free(tname, AFSNAMEMAX);
if (ttargetName)
afs_osi_Free(ttargetName, tlen);
return code;
}
示例14: afs_ResyncDisconFiles
/*!
* All files that have been dirty before disconnection are going to
* be replayed back to the server.
*
* \param areq Request from the user.
* \param acred User credentials.
*
* \return If all files synchronized succesfully, return 0, otherwise
* return error code
*
* \note For now, it's the request from the PDiscon pioctl.
*
*/
int
afs_ResyncDisconFiles(struct vrequest *areq, afs_ucred_t *acred)
{
struct afs_conn *tc;
struct rx_connection *rxconn;
struct vcache *tvc;
struct AFSFetchStatus fstat;
struct AFSCallBack callback;
struct AFSVolSync tsync;
int code = 0;
afs_int32 start = 0;
XSTATS_DECLS;
/*AFS_STATCNT(afs_ResyncDisconFiles);*/
ObtainWriteLock(&afs_disconDirtyLock, 707);
while (!QEmpty(&afs_disconDirty)) {
tvc = QEntry(QPrev(&afs_disconDirty), struct vcache, dirtyq);
/* Can't lock tvc whilst holding the discon dirty lock */
ReleaseWriteLock(&afs_disconDirtyLock);
/* Get local write lock. */
ObtainWriteLock(&tvc->lock, 705);
if (tvc->f.ddirty_flags & VDisconRemove) {
/* Delete the file on the server and just move on
* to the next file. After all, it has been deleted
* we can't replay any other operation it.
*/
code = afs_ProcessOpRemove(tvc, areq);
goto next_file;
} else if (tvc->f.ddirty_flags & VDisconCreate) {
/* For newly created files, we don't need a server lock. */
code = afs_ProcessOpCreate(tvc, areq, acred);
if (code)
goto next_file;
tvc->f.ddirty_flags &= ~VDisconCreate;
tvc->f.ddirty_flags |= VDisconCreated;
}
#if 0
/* Get server write lock. */
do {
tc = afs_Conn(&tvc->f.fid, areq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
RX_AFS_GUNLOCK();
code = RXAFS_SetLock(rxconn,
(struct AFSFid *)&tvc->f.fid.Fid,
LockWrite,
&tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze(tc,
rxconn,
code,
&tvc->f.fid,
areq,
AFS_STATS_FS_RPCIDX_SETLOCK,
SHARED_LOCK,
NULL));
if (code)
goto next_file;
#endif
if (tvc->f.ddirty_flags & VDisconRename) {
/* If we're renaming the file, do so now */
code = afs_ProcessOpRename(tvc, areq);
if (code)
goto unlock_srv_file;
}
/* Issue a FetchStatus to get info about DV and callbacks. */
do {
tc = afs_Conn(&tvc->f.fid, areq, SHARED_LOCK, &rxconn);
if (tc) {
tvc->callback = tc->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
RX_AFS_GUNLOCK();
code = RXAFS_FetchStatus(rxconn,
(struct AFSFid *)&tvc->f.fid.Fid,
//.........这里部分代码省略.........
示例15: afsremove
int
afsremove(register struct vcache *adp, register struct dcache *tdc,
register struct vcache *tvc, char *aname, afs_ucred_t *acred,
struct vrequest *treqp)
{
register afs_int32 code = 0;
register struct afs_conn *tc;
struct AFSFetchStatus OutDirStatus;
struct AFSVolSync tsync;
XSTATS_DECLS;
if (!AFS_IS_DISCONNECTED) {
do {
tc = afs_Conn(&adp->f.fid, treqp, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEFILE);
RX_AFS_GUNLOCK();
code =
RXAFS_RemoveFile(tc->id, (struct AFSFid *)&adp->f.fid.Fid,
aname, &OutDirStatus, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
(tc, code, &adp->f.fid, treqp, AFS_STATS_FS_RPCIDX_REMOVEFILE,
SHARED_LOCK, NULL));
}
osi_dnlc_remove(adp, aname, tvc);
if (code) {
if (tdc) {
ReleaseSharedLock(&tdc->lock);
afs_PutDCache(tdc);
}
if (tvc)
afs_PutVCache(tvc);
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 497);
afs_DequeueCallback(adp);
adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
ReleaseWriteLock(&adp->lock);
code = afs_CheckCode(code, treqp, 21);
return code;
}
if (tdc)
UpgradeSToWLock(&tdc->lock, 637);
if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
/* we can do it locally */
code = afs_dir_Delete(tdc, aname);
if (code) {
ZapDCE(tdc); /* surprise error -- invalid value */
DZap(tdc);
}
}
if (tdc) {
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc); /* drop ref count */
}
ReleaseWriteLock(&adp->lock);
/* now, get vnode for unlinked dude, and see if we should force it
* from cache. adp is now the deleted files vnode. Note that we
* call FindVCache instead of GetVCache since if the file's really
* gone, we won't be able to fetch the status info anyway. */
if (tvc) {
afs_MarinerLog("store$Removing", tvc);
#ifdef AFS_BOZONLOCK_ENV
afs_BozonLock(&tvc->pvnLock, tvc);
/* Since afs_TryToSmush will do a pvn_vptrunc */
#endif
ObtainWriteLock(&tvc->lock, 141);
/* note that callback will be broken on the deleted file if there are
* still >0 links left to it, so we'll get the stat right */
tvc->f.m.LinkCount--;
tvc->f.states &= ~CUnique; /* For the dfs xlator */
if (tvc->f.m.LinkCount == 0 && !osi_Active(tvc)) {
if (!AFS_NFSXLATORREQ(acred))
afs_TryToSmush(tvc, acred, 0);
}
ReleaseWriteLock(&tvc->lock);
#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
afs_PutVCache(tvc);
}
return (0);
}