当前位置: 首页>>代码示例>>Python>>正文


Python filetool.directory函数代码示例

本文整理汇总了Python中misc.filetool.directory函数的典型用法代码示例。如果您正苦于以下问题:Python directory函数的具体用法?Python directory怎么用?Python directory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了directory函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _handleResources

def _handleResources(script, generator, filtered=True):

    def createResourceInfo(res, resval):
        resinfo = [ { "target": "resource", "data": { res : resval }} ]
        #filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
        return resinfo

    def copyResource(res, library):
        sourcepath = os.path.join(library._resourcePath, res)
        targetpath = approot + "/resource/" + res
        filetool.directory(os.path.dirname(targetpath))
        shutil.copy(sourcepath, targetpath)
        return

    # ----------------------------------------------------------------------
    context.console.info("Processing resources: ", False)
    approot = context.jobconf.get("provider/app-root", "./provider")
    filetool.directory(approot+"/data")
    filetool.directory(approot+"/resource")
    
    # quick copy of runLogResources, for fast results
    packages   = script.packagesSorted()
    parts      = script.parts
    variants   = script.variants

    allresources = {}
    if filtered:
        # -- the next call is fake, just to populate package.data.resources!
        _ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
        for packageId, package in enumerate(packages):
            allresources.update(package.data.resources)
    else:
        # get the main library
        mainlib = [x for x in script.libraries if x.namespace == script.namespace][0]
        reslist = mainlib.getResources()
        allresources = ResourceHandler.createResourceStruct(reslist, updateOnlyExistingSprites = False)

    # get resource info
    resinfos = {}
    numResources = len(allresources)
    for num,res in enumerate(allresources):
        context.console.progress(num+1, numResources)
        # fake a classId-like resourceId ("a.b.c"), for filter matching
        resId = os.path.splitext(res)[0]
        resId = resId.replace("/", ".")
        if filtered and not passesOutputfilter(resId):
            continue
        resinfos[res] = createResourceInfo(res, allresources[res])
        # extract library name space
        if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
            library_ns = allresources[res][3]
        else: # html page etc. = "qx"
            library_ns = allresources[res]
        if library_ns:  # library_ns == '' means embedded image -> no copying
            library    = libraries[library_ns]
            copyResource(res, library)

    filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))

    return
开发者ID:dominikg,项目名称:qooxdoo,代码行数:60,代码来源:CodeProvider.py

示例2: archive_download

    def archive_download(self, url, cache_path, checksum):
        rc = 0
        # Download
        arcfile = os.path.join(cache_path, os.path.basename(url))
        tdir = os.path.dirname(arcfile)
        filetool.directory(tdir)
        tfp = open(arcfile, "wb")
        #(fname, urlinfo) = urllib.urlretrieve(url, arcfile)
        urlobj = urllib.urlopen(url)
        assert urlobj.getcode() == 200, "Could not the download contrib archive: %s" % url
        hashobj = self.copy_and_hash(urlobj.fp, tfp)
        assert hashobj.hexdigest()==checksum, "Checksum of archive does not validate (should be: %s): %s" % (checksum, arcfile)
        urlobj.close()
        tfp.close()

        # Extract
        if url.endswith('.zip'):
            zipf = ZipFile(arcfile, 'r')
            zipf.extractall(tdir)
            zipf.close()
        else: # .tar, .tgz, .tar.gz, .tar.bz2
            tar = tarfile.open(arcfile)
            tar.extractall(tdir)
            tar.close

        # Eliminate archive top-dir
        _, archive_dirs, _ = os.walk(tdir).next()
        assert archive_dirs, "The downloaded archive is not in single top-dir format: %s" % arcfile
        archive_top = os.path.join(tdir, archive_dirs[0]) # just take the first dir entry
        for item in os.listdir(archive_top):
            shutil.move(os.path.join(archive_top, item), tdir)
        os.rmdir(archive_top)
        os.unlink(arcfile)

        return rc
开发者ID:VitalHealthSoftware,项目名称:qooxdoo,代码行数:35,代码来源:ContribLoader.py

示例3: write

    def write(self, cacheId, content, memory=False, writeToFile=True, keepLock=False):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeCond(cacheId):
            print "\nWriting %s ..." % (cacheId,),
        if writeToFile:
            try:
                if not cacheFile in self._locked_files:
                    self._locked_files.add(cacheFile)  # this is not atomic with the next one!
                    filetool.lock(cacheFile)

                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                if not keepLock:
                    filetool.unlock(cacheFile)
                    self._locked_files.remove(cacheFile)  # not atomic with the previous one!

                #print "wrote cacheId: %s" % cacheId
                if writeCond(cacheId):
                    print "to disk"

            except (IOError, EOFError, pickle.PickleError, pickle.PicklingError), e:
                e.args = ("Could not store cache to %s\n" % self._path + e.args[0], ) + e.args[1:]
                raise e
开发者ID:unify,项目名称:qooxdoo,代码行数:28,代码来源:Cache.py

示例4: read

    def read(self, cacheId, dependsOn=None, memory=False):
        if memcache.has_key(cacheId):
            return memcache[cacheId]

        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None

        # Out of date check
        if dependsOn:
            fileModTime = os.stat(dependsOn).st_mtime
            if fileModTime > cacheModTime:
                return None

        try:
            content = cPickle.load(open(cacheFile, 'rb'))

            if memory:
                memcache[cacheId] = content

            return content

        except (IOError, EOFError, cPickle.PickleError, cPickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None
开发者ID:AbhishekJain1,项目名称:testing-spagobi,代码行数:29,代码来源:Cache.py

示例5: _check_path

 def _check_path(self, path):
     if not os.path.exists(path):
         filetool.directory(path)
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access to the first call of read()/write()
         pass
开发者ID:mikegr,项目名称:lectorious-grails-qooxdoo,代码行数:8,代码来源:Cache.py

示例6: read

    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        if writeCond(cacheId):
            print "\nReading %s ..." % (cacheId,),
        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                if writeCond(cacheId):
                    print "from memcache"
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
                return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            gc.disable()
            try:
                content = pickle.loads(fobj.read().decode('zlib'))
            finally:
                gc.enable()

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content':content, 'time': time.time()}

            #print "read cacheId: %s" % cacheId
            if writeCond(cacheId):
                print "from disk"
            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.warn("Could not read cache object %s, recalculating..." % cacheFile)
            return None, cacheModTime
开发者ID:stefanand,项目名称:qooxdoo-titanium,代码行数:58,代码来源:Cache.py

示例7: copyResource

 def copyResource(res, library):
     if skip_expression.search(os.path.basename(res)):
         return
     sourcepath = os.path.join(library.resourcePath, res)
     targetpath = approot + "/resource/" + res
     filetool.directory(os.path.dirname(targetpath))
     shutil.copy(sourcepath, targetpath)
     #copier = copytool.CopyTool(context.console)
     #args   = ['-x', ','.join(filetool.VERSIONCONTROL_DIR_PATTS), sourcepath, targetpath]
     #copier.parse_args(args)
     #copier.do_work()
     return
开发者ID:RemiHeugue,项目名称:qooxdoo,代码行数:12,代码来源:CodeProvider.py

示例8: _check_path

 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     self._console.outdent()
开发者ID:reneolivo,项目名称:qooxdoo,代码行数:14,代码来源:Cache.py

示例9: write

    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                cPickle.dump(content, open(cacheFile, 'wb'), 2)
    
            except (IOError, EOFError, cPickle.PickleError, cPickle.PicklingError):
                self._console.error("Could not store cache to %s" % self._path)
                sys.exit(1)

        if memory:
            memcache[cacheId] = content
开发者ID:AbhishekJain1,项目名称:testing-spagobi,代码行数:14,代码来源:Cache.py

示例10: _handleResources

def _handleResources(script, generator):

    def createResourceInfo(res, resval):
        resinfo = [ { "target": "resource", "data": { res : resval }} ]
        #filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
        return resinfo

    def copyResource(res, library):
        sourcepath = os.path.join(library['path'], library['resource'], res)
        targetpath = approot + "/resource/" + res
        filetool.directory(os.path.dirname(targetpath))
        shutil.copy(sourcepath, targetpath)
        return

    # ----------------------------------------------------------------------
    approot = context.jobconf.get("provider/app-root", "./provider")
    filetool.directory(approot+"/data")
    filetool.directory(approot+"/resource")
    
    # quick copy of runLogResources, for fast results
    packages   = script.packagesSortedSimple()
    parts      = script.parts
    variants   = script.variants

    allresources = {}
    # get resource info
    # -- the next call is fake, just to populate package.data.resources!
    _ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
    for packageId, package in enumerate(packages):
        allresources.update(package.data.resources)
    
    resinfos = {}
    for res in allresources:
        # fake a classId-like resourceId ("a.b.c"), for filter matching
        resId = os.path.splitext(res)[0]
        resId = resId.replace("/", ".")
        if passesOutputfilter(resId):
            resinfos[res] = createResourceInfo(res, allresources[res])
            # extract library name space
            if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
                library_ns = allresources[res][3]
            else: # html page etc. = "qx"
                library_ns = allresources[res]
            library    = libraries[library_ns]
            copyResource(res, library)

    filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))

    return
开发者ID:mengu,项目名称:grooxdoo,代码行数:49,代码来源:CodeProvider.py

示例11: _check_path

 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     if len(os.listdir(path)) < CACHE_THRESHOLD: # not even minimal framework classes cached
         self._console.info("Populating the cache, this may take some time")
     self._console.outdent()
开发者ID:6r1d,项目名称:qooxdoo,代码行数:16,代码来源:Cache.py

示例12: read

    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
                return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            content = pickle.load(fobj)

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content':content, 'time': time.time()}

            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None, cacheModTime
开发者ID:salmon-charles,项目名称:qooxdoo-build-tool,代码行数:47,代码来源:Cache.py

示例13: main

def main():
    apidata = {}
    apidata['type'] = 'doctree'
    apidata['children'] = []
    apidata['attributes'] = {}
    apidata['children'].append({
      "type":"packages","attributes":{},"children":[]  
    })
    filetool.directory(store_path)

    dirwalker = filetool.find(module_root, r'\.py$')

    for pyfile in dirwalker:
        #if os.stat(pyfile).st_size == 0:
        #    continue
        # get the file's api doc as json
        filejson = pyapi2json(pyfile)
        apipackage = file2package(pyfile, module_root)
        # and store it
        filetool.save(store_path+'/'+apipackage+'.json', filejson)
        # make an entry in apidata struct
        levels = apipackage.split('.')
        curr = apidata['children'][0]['children']
        for pos,level in enumerate(levels):
            if level not in (x['attributes']['name'] for x in curr if 'name' in x['attributes']):
                newentry = {
                    "children" : [],
                    "type" : "packages" if pos % 2 else "package",
                    "attributes" : {
                        "packageName" : ".".join(levels[:pos]),
                        "name" : level,
                        "fullName" : ".".join(levels[:pos+1])
                    }
                }
                if pos==len(levels)-1:
                    newentry["externalRef"] = True
                    #del newentry['children']
                    #newentry["type"] = "classes"
                    pass
                curr.append(newentry)
                curr = newentry['children']
            else:
                curr = [x['children'] for x in curr if x['attributes']['name']==level][0]
        

    # store apidata
    filetool.save(store_path+'/'+"apidata.json", json.dumps(apidata))
开发者ID:1and1,项目名称:qooxdoo,代码行数:47,代码来源:generator_api.py

示例14: write

    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                self._locked_files.add(cacheFile)  # this is not atomic with the next one!
                filetool.lock(cacheFile)
                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)  # not atomic with the previous one!

            except (IOError, EOFError, pickle.PickleError, pickle.PicklingError), e:
                e.args = ("Could not store cache to %s\n" % self._path + e.args[0], ) + e.args[1:]
                raise e
开发者ID:carriercomm,项目名称:EyeOS,代码行数:19,代码来源:Cache.py

示例15: combine

    def combine(self, combined, files, horizontal, type="extension"):
        self._console.indent()
        if horizontal:
            orientation = "x1"
        else:
            orientation = "1x"

        # combine
        config = []
        clips = []
        top = 0
        left = 0
        allfiles = []
        for file in files:
            allfiles.extend(glob.glob(file))
        #self._console.debug("Combining the following images: %r" % allfiles)
        for file in allfiles:
            if not os.path.exists(file):
                self._console.warn("Non-existing file spec, skipping: %s" % file)
                continue
            clips.append(file)
            imginfo = Image(file).getInfoMap()
            width, height = imginfo['width'], imginfo['height']
            config.append({'file':file, 'combined':combined, 'left': -left,
                           'top': -top, 'width':width, 'height':height, 'type':imginfo['type']})
            if horizontal:
                left += width
            else:
                top += height

        if len(clips) == 0:
            self._console.warn("No images to combine; skipping")
        else:
            filetool.directory(os.path.dirname(combined))
            if type == "extension":
                self.combineImgMagick(clips, combined, orientation)
            elif type == "base64":
                self.combineBase64(config)

        self._console.outdent()
        return config
开发者ID:1and1,项目名称:qooxdoo,代码行数:41,代码来源:ImageClipping.py


注:本文中的misc.filetool.directory函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。