當前位置: 首頁>>代碼示例>>Python>>正文


Python sys.exit方法代碼示例

本文整理匯總了Python中os.sys.exit方法的典型用法代碼示例。如果您正苦於以下問題:Python sys.exit方法的具體用法?Python sys.exit怎麽用?Python sys.exit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在os.sys的用法示例。


在下文中一共展示了sys.exit方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: processZip

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def processZip( sDirectoryName, sZipfilename, datfilename, indexlist ):
    sBasename = sZipfilename.replace('.zip', '')
    thiszip = zipfile.ZipFile( sDirectoryName + '/' + sZipfilename )
    datafile = open( sDirectoryName + '/' + datfilename + '~', 'wb' )
    namelist = thiszip.namelist()
    for index in indexlist:
        print( 'index: ' + str(index) )
#    for name in thiszip.namelist():
        name = namelist[index + 1] # skip name of directory
        print( name )
        if name.endswith('.sgf'):
            contents = thiszip.read( name )
            #print( contents )
            walkthroughSgf( datafile, contents )
    #            sys.exit(-1)
                #break
        else:
            print('not sgf: [' + name + ']' )
            sys.exit(-1)
    datafile.write('END')
    datafile.close()
    os.rename( sDirectoryName + '/' + datfilename + '~', sDirectoryName + '/' + datfilename ) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:24,代碼來源:kgs_dataset_preprocessor.py

示例2: downloadFiles

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def downloadFiles( sTargetDirectory ):
    fileInfos = index_processor.get_fileInfos( sTargetDirectory )
    urlsToDo = []
    for fileinfo in fileInfos:
        url = fileinfo['url']
        print( url )
        sFilename = fileinfo['filename']
        if not os.path.isfile( sTargetDirectory + '/' + sFilename ):
            urlsToDo.append( ( url, sTargetDirectory + '/' + sFilename ) )
            print( 'to do: ' + url + ' ... ' )
    pool = multiprocessing.Pool( processes = 16 )
    try:
        it = pool.imap( worker, urlsToDo,  )
        for i in it:
            # print( i )
            pass
        pool.close()
        pool.join()
    except KeyboardInterrupt:
        print( "Caught KeyboardInterrupt, terminating workers" )
        pool.terminate()
        pool.join()
        sys.exit(-1) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:25,代碼來源:zip_downloader.py

示例3: main_mulprocessing

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def main_mulprocessing(min_id=None,
                       max_id=None,
                       window_size=5000,
                       number_of_processes=4,
                       drop_first=False):
    engine = create_engine(
        URL(**CONF['database']['connect_args']),
        pool_size=1,
        pool_recycle=CONF['database']['pool_recycle'],
        client_encoding='utf8')
    init_tables(engine, drop_first)
    try:
        manager = ParserManager(
            min_id=min_id,
            max_id=max_id,
            window_size=window_size,
            number_of_processes=number_of_processes)
        manager.start()
        manager.join()
    except (KeyboardInterrupt, SystemExit):
        logger.info('interrupt signal received')
        sys.exit(1)
    except Exception as e:
        raise e 
開發者ID:IUNetSci,項目名稱:hoaxy-backend,代碼行數:26,代碼來源:new_tweet_parser.py

示例4: zipsToDats

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def zipsToDats( sTargetDirectory, samplesList, name ):
    iCount = 0
    zipNames = set()
    indexesByZipName = {}
    for sample in samplesList:
        (filename,index) = sample
        zipNames.add( filename )
        if not indexesByZipName.has_key( filename ):
           indexesByZipName[filename] = []
        indexesByZipName[filename].append( index )
    print( 'num zips: ' + str( len( zipNames ) ) )

    zipsToDo = []
    for zipName in zipNames:
        sBasename = zipName.replace('.zip','')
        sDatFilename = sBasename + name + '.dat'
        if not os.path.isfile( sTargetDirectory + '/' + sDatFilename ):
            zipsToDo.append( ( sTargetDirectory, zipName, sDatFilename, indexesByZipName[zipName] ) )        

    cores = multiprocessing.cpu_count()
    pool = multiprocessing.Pool( processes = cores )
#    pool.map( loadAllSgfs, dirsToDo )
    p = pool.map_async( worker, zipsToDo )
    try:
        results = p.get(0xFFFF)
#        pool.close()
#        pool.join()
    except KeyboardInterrupt:
        print( "Caught KeyboardInterrupt, terminating workers" )
        pool.terminate()
        pool.join()
        sys.exit(-1) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:34,代碼來源:kgs_dataset_preprocessor.py

示例5: createSingleDat

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def createSingleDat( targetDirectory, name, samples ):
    print( 'creating consolidated .dat...' )

    filePath = targetDirectory + '/kgsgo-' + name + '.dat'    
    if os.path.isfile( filePath ):
        print( 'consolidated file ' + filePath + ' already exists :-)' )
        return

    # first check if we have all files
    # first need to get the names of all files
    datfilesNeeded = set()
    for sample in samples:
        (filename, index ) = sample
        datfilesNeeded.add( filename )
    print( 'total dat files to be consolidated: ' + str( len( datfilesNeeded ) ) )
    datfilenames = []
    for zipfilename in datfilesNeeded:
        datfilename = zipfilename.replace('.zip','') + name + '.dat'
        datfilenames.append(datfilename)
    allfilespresent = True
    for datfilename in datfilenames:
        if not os.path.isfile( targetDirectory + '/' + datfilename ):
            allfilespresent = False
            print( 'Missing dat file: ' + datfilename )
            sys.exit(-1)

    consolidatedfile = open( filePath + '~', 'wb' )
    for filename in datfilenames:
        print( 'reading from ' + filename + ' ...' )
        filepath = sTargetDirectory + '/' + filename
        singledat = open( filepath, 'rb' )
        data = singledat.read()
        if data[-3:] != 'END':
            print( 'Invalid file, doesnt end with END: ' + filepath )
            sys.exit(-1)
        consolidatedfile.write( data[:-3] )
        singledat.close()
    consolidatedfile.write( 'END' )
    consolidatedfile.close()
    os.rename( filePath + '~', filePath ) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:42,代碼來源:kgs_dataset_preprocessor.py

示例6: zipsToDats

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def zipsToDats( sTargetDirectory, samplesList, name ):
    iCount = 0
    zipNames = set()
    indexesByZipName = {}
    for sample in samplesList:
        (filename,index) = sample
        zipNames.add( filename )
        if not indexesByZipName.has_key( filename ):
           indexesByZipName[filename] = []
        indexesByZipName[filename].append( index )
    print( 'num zips: ' + str( len( zipNames ) ) )

    zipsToDo = []
    for zipName in zipNames:
        sBasename = zipName.replace('.zip','')
        sDatFilename = sBasename + name + '-v2.dat'
        if not os.path.isfile( sTargetDirectory + '/' + sDatFilename ):
            zipsToDo.append( ( sTargetDirectory, zipName, sDatFilename, indexesByZipName[zipName] ) )        

    cores = multiprocessing.cpu_count()
    pool = multiprocessing.Pool( processes = cores )
#    pool.map( loadAllSgfs, dirsToDo )
    p = pool.map_async( worker, zipsToDo )
    try:
        results = p.get(0xFFFF)
#        pool.close()
#        pool.join()
    except KeyboardInterrupt:
        print( "Caught KeyboardInterrupt, terminating workers" )
        pool.terminate()
        pool.join()
        sys.exit(-1) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:34,代碼來源:kgs_dataset_preprocessor_v2.py

示例7: get_params

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def get_params():
    parser = get_params_parser()
    args = parser.parse_args()

    if not args.org or not args.token:
        parser.error("token and org params must be provided.")
        sys.exit(1)

    return args 
開發者ID:chaoss,項目名稱:grimoirelab-elk,代碼行數:11,代碼來源:gh2k.py

示例8: map_to_workers

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def map_to_workers(self, name, samples):
        '''
        Determine the list of zip files that need to be processed, then map load
        to number of available CPUs.
        '''
        # Create a dictionary with file names as keys and games as values
        zip_names = set()
        indices_by_zip_name = {}
        for filename, index in samples:
            zip_names.add(filename)
            if filename not in indices_by_zip_name:
                indices_by_zip_name[filename] = []
            indices_by_zip_name[filename].append(index)
        print('>>> Number of zip files: ' + str(len(zip_names)))

        # Transform the above dictionary to a list that can be processed in parallel
        zips_to_process = []
        for zip_name in zip_names:
            base_name = zip_name.replace('.tar.gz', '')
            data_file_name = base_name + name
            if not os.path.isfile(self.data_dir + '/' + data_file_name):
                zips_to_process.append((self.__class__, self.data_dir, self.num_planes, zip_name,
                                        data_file_name, indices_by_zip_name[zip_name]))

        # Determine number of CPU cores and split work load among them
        cores = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(processes=cores)
        p = pool.map_async(worker, zips_to_process)
        try:
            results = p.get(0xFFFF)
            print(results)
        except KeyboardInterrupt:
            print("Caught KeyboardInterrupt, terminating workers")
            pool.terminate()
            pool.join()
            sys.exit(-1) 
開發者ID:maxpumperla,項目名稱:betago,代碼行數:38,代碼來源:base_processor.py

示例9: _download_pretrained_model

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def _download_pretrained_model(prompt=True):
        """Downloads the pre-trained BIST model if non-existent."""
        model_info_exists = path.isfile(IntentExtractionApi.pretrained_model_info)
        model_exists = path.isfile(IntentExtractionApi.pretrained_model)
        if not model_exists or not model_info_exists:
            print(
                "The pre-trained models to be downloaded for the intent extraction dataset "
                "are licensed under Apache 2.0. By downloading, you accept the terms "
                "and conditions provided by the license"
            )
            makedirs(IntentExtractionApi.model_dir, exist_ok=True)
            if prompt is True:
                agreed = IntentExtractionApi._prompt()
                if agreed is False:
                    sys.exit(0)
            download_unlicensed_file(
                "https://s3-us-west-2.amazonaws.com/nlp-architect-data" "/models/intent/",
                "model_info.dat",
                IntentExtractionApi.pretrained_model_info,
            )
            download_unlicensed_file(
                "https://s3-us-west-2.amazonaws.com/nlp-architect-data" "/models/intent/",
                "model.h5",
                IntentExtractionApi.pretrained_model,
            )
            print("Done.") 
開發者ID:NervanaSystems,項目名稱:nlp-architect,代碼行數:28,代碼來源:intent_extraction_api.py

示例10: _download_pretrained_model

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def _download_pretrained_model(self, prompt=True):
        """Downloads the pre-trained BIST model if non-existent."""
        model_exists = path.isfile(self.pretrained_model)
        model_info_exists = path.isfile(self.pretrained_model_info)
        if not model_exists or not model_info_exists:
            print(
                "The pre-trained models to be downloaded for the NER dataset "
                "are licensed under Apache 2.0. By downloading, you accept the terms "
                "and conditions provided by the license"
            )
            makedirs(self.model_dir, exist_ok=True)
            if prompt is True:
                agreed = NerApi._prompt()
                if agreed is False:
                    sys.exit(0)
            download_unlicensed_file(
                "https://s3-us-west-2.amazonaws.com/nlp-architect-data" "/models/ner/",
                "model_v4.h5",
                self.pretrained_model,
            )
            download_unlicensed_file(
                "https://s3-us-west-2.amazonaws.com/nlp-architect-data" "/models/ner/",
                "model_info_v4.dat",
                self.pretrained_model_info,
            )
            print("Done.") 
開發者ID:NervanaSystems,項目名稱:nlp-architect,代碼行數:28,代碼來源:ner_api.py

示例11: walkthroughSgf

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def walkthroughSgf( datafile, sgfContents ):
    sgf = gomill.sgf.Sgf_game.from_string( sgfContents )
    # print sgf
    if sgf.get_size() != 19:
        print( 'boardsize not 19, ignoring' )
        return
    goBoard = GoBoard.GoBoard(19)
    doneFirstMove = False
    if sgf.get_handicap() != None and sgf.get_handicap() != 0:
        #print 'handicap not zero, ignoring (' + str( sgf.get_handicap() ) + ')'
        #handicappoints = gomill.handicap_layout.handicap_points( sgf.get_handicap(), 19 )
        numhandicap = sgf.get_handicap()
        #print sgf.get_root().get_setup_stones()
        #sys.exit(-1)
        #for move in getHandicapPoints( numhandicap ):
        for set in sgf.get_root().get_setup_stones():
            #print set
            for move in set:
                #print move
                goBoard.applyMove( 'b', move )
        #sys.exit(-1)
#        print( 'handicap: ' + str(numhandicap) )
        doneFirstMove = True
        #sys.exit(-1)
    moveIdx = 0
    for it in sgf.main_sequence_iter():
        (color,move) = it.get_move()
#        print( 'color ' + str(color) )
#        print( move )
        if color != None and move != None:
            (row,col) = move
            if doneFirstMove and datafile != None:
                addToDataFile( datafile, color, move, goBoard )
            #print 'applying move ' + str( moveIdx )
            goBoard.applyMove( color, (row,col) )
            #print goBoard
            moveIdx = moveIdx + 1
            doneFirstMove = True
            #if moveIdx >= 120:
            #    sys.exit(-1)
    #print( goBoard )
    #print 'winner: ' + sgf.get_winner() 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:44,代碼來源:kgs_dataset_preprocessor.py

示例12: walkthroughSgf

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def walkthroughSgf( countOnly, datafile, sgfContents ):
    sgf = gomill.sgf.Sgf_game.from_string( sgfContents )
    # print sgf
    if sgf.get_size() != 19:
        print( 'boardsize not 19, ignoring' )
        return
    goBoard = GoBoard.GoBoard(19)
    doneFirstMove = False
    if sgf.get_handicap() != None and sgf.get_handicap() != 0:
        #print 'handicap not zero, ignoring (' + str( sgf.get_handicap() ) + ')'
        #handicappoints = gomill.handicap_layout.handicap_points( sgf.get_handicap(), 19 )
        numhandicap = sgf.get_handicap()
        #print sgf.get_root().get_setup_stones()
        #sys.exit(-1)
        #for move in getHandicapPoints( numhandicap ):
        for set in sgf.get_root().get_setup_stones():
            #print set
            for move in set:
                #print move
                goBoard.applyMove( 'b', move )
        #sys.exit(-1)
#        print( 'handicap: ' + str(numhandicap) )
        doneFirstMove = True
        #sys.exit(-1)
    # first, count number of moves...
    if countOnly:
        numMoves = 0
        countDoneFirstMove = doneFirstMove
        for it in sgf.main_sequence_iter():
            (color,move) = it.get_move()
            if color != None and move != None:
                #(row,col) = move
                if countDoneFirstMove:
                    numMoves = numMoves + 1
                    #addToDataFile( datafile, color, move, goBoard )
                countDoneFirstMove = True
        return numMoves
    #writeFileHeader( datafile, numMoves, 7, 19, 'int', 1 )
    moveIdx = 0
    for it in sgf.main_sequence_iter():
        (color,move) = it.get_move()
        if color != None and move != None:
            (row,col) = move
            if doneFirstMove:
                addToDataFile( datafile, color, move, goBoard )
            goBoard.applyMove( color, (row,col) )
            moveIdx = moveIdx + 1
            doneFirstMove = True

# unzip the zip, process the sgfs to .dat, and remove the unzipped directory 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:52,代碼來源:kgs_dataset_preprocessor_v2.py

示例13: createSingleDat

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def createSingleDat( targetDirectory, name, samples ):
    print( 'creating consolidated .dat...' )

    filePath = targetDirectory + '/kgsgo-' + name + '-v2.dat'    
    if os.path.isfile( filePath ):
        print( 'consolidated file ' + filePath + ' already exists :-)' )
        return

    # first check if we have all files
    # first need to get the names of all files
    # also, need to count total number of records
    numRecords = 0
    datfilesNeeded = set()
    for sample in samples:
        (filename, index ) = sample
        datfilesNeeded.add( filename )
    print( 'total dat files to be consolidated: ' + str( len( datfilesNeeded ) ) )
    datfilenames = []
    for zipfilename in datfilesNeeded:
        datfilename = zipfilename.replace('.zip','') + name + '-v2.dat'
        datfilenames.append(datfilename)
    allfilespresent = True
    for datfilename in datfilenames:
        if not os.path.isfile( targetDirectory + '/' + datfilename ):
            allfilespresent = False
            print( 'Missing dat file: ' + datfilename )
            sys.exit(-1)
        childdatfile = open( targetDirectory + '/' + datfilename, 'rb' )
        header = childdatfile.read(1024)
        thisN = int( header.split('-n=')[1].split('-')[0] )
        childdatfile.close()
        numRecords = numRecords + thisN
        print( 'child ' + datfilename + ' N=' + str(thisN) )

    consolidatedfile = open( filePath + '~', 'wb' )
    writeFileHeader( consolidatedfile, numRecords, 7, 19, 'int', 1 )
    for filename in datfilenames:
        print( 'reading from ' + filename + ' ...' )
        filepath = targetDirectory + '/' + filename
        singledat = open( filepath, 'rb' )
        # first, skip header
        singledat.read(1024)
        data = singledat.read()
        if data[-3:] != 'END':
            print( 'Invalid file, doesnt end with END: ' + filepath )
            sys.exit(-1)
        consolidatedfile.write( data[:-3] )
        singledat.close()
    consolidatedfile.write( 'END' )
    consolidatedfile.close()
    os.rename( filePath + '~', filePath ) 
開發者ID:hughperkins,項目名稱:kgsgo-dataset-preprocessor,代碼行數:53,代碼來源:kgs_dataset_preprocessor_v2.py

示例14: consolidate_games

# 需要導入模塊: from os import sys [as 別名]
# 或者: from os.sys import exit [as 別名]
def consolidate_games(self, name, samples):
        print('>>> Creating consolidated .dat...')
        file_path = self.data_dir + '/kgsgo_' + name
        if os.path.isfile(file_path):
            print('>>> File ' + file_path + ' already exists')
            return

        files_needed = set(file_name for file_name, index in samples)
        print('>>> Total dat files to be consolidated: ' + str(len(files_needed)))

        # Collect names of data files
        data_file_names = []
        for zip_file_name in files_needed:
            data_file_name = zip_file_name.replace('.tar.gz', '') + name
            data_file_names.append(data_file_name)

        # Count total number of moves
        num_records = 0
        for data_file_name in data_file_names:
            if not os.path.isfile(self.data_dir + '/' + data_file_name):
                print('>>> Missing file: ' + data_file_name)
                sys.exit(-1)
            child = open(self.data_dir + '/' + data_file_name, 'rb')
            header = child.read(1024)
            this_n = int(header.split('-n=')[1].split('-')[0])
            child.close()
            num_records = num_records + this_n

        # Write content to consolidate file
        consolidated_file = open(file_path, 'wb')
        self.write_file_header(consolidated_file, num_records, self.num_planes, 19, 1)
        for filename in data_file_names:
            print('>>> Reading from ' + filename + ' ...')
            file_path = self.data_dir + '/' + filename
            single_dat = open(file_path, 'rb')
            single_dat.read(1024)
            data = single_dat.read()
            if data[-3:] != 'END':
                raise Exception('Invalid file, doesnt end with END: ' + file_path)
            consolidated_file.write(data[:-3])
            single_dat.close()
        consolidated_file.write('END')
        consolidated_file.close() 
開發者ID:maxpumperla,項目名稱:betago,代碼行數:45,代碼來源:base_processor.py


注:本文中的os.sys.exit方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。