当前位置: 首页>>代码示例>>Python>>正文


Python ptvsd.wait_for_attach方法代码示例

本文整理汇总了Python中ptvsd.wait_for_attach方法的典型用法代码示例。如果您正苦于以下问题:Python ptvsd.wait_for_attach方法的具体用法?Python ptvsd.wait_for_attach怎么用?Python ptvsd.wait_for_attach使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ptvsd的用法示例。


在下文中一共展示了ptvsd.wait_for_attach方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: start

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def start():
    if environ.get('DEBUGGING'):
        ptvsd.enable_attach(address=(
            '127.0.0.1', int(environ.get('DEBUG_PORT'))))
        print(ATTACH_DEBUGGER_EVENT)
        t = Timer(int(environ.get("debugger_wait_time", 30)), _handle_detached)
        t.start()
        ptvsd.wait_for_attach()
        t.cancel()
    logger.debug('Starting grpc server..')
    server = grpc.server(ThreadPoolExecutor(max_workers=1))
    p = server.add_insecure_port('127.0.0.1:0')
    handler = handlers.GrpcServiceHandler(server)
    spg.add_RunnerServicer_to_server(handler, server)
    logger.info('Listening on port:{}'.format(p))
    server.start()
    t = threading.Thread(name="listener", target=handler.wait_for_kill_event)
    t.start()
    t.join()
    os._exit(0) 
开发者ID:getgauge,项目名称:gauge-python,代码行数:22,代码来源:start.py

示例2: debug_connect

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def debug_connect():
    connected = False
    while not connected:
        try:
            import socket
            server = socket.socket(proto=socket.IPPROTO_TCP)
            server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            server.bind(('127.0.0.1', 3018))
            server.close()
            connected = True
        except OSError:
            import time
            time.sleep(1)

    import ptvsd
    ptvsd.enable_attach('SECRET', ('127.0.0.1', 3018))
    ptvsd.wait_for_attach()
    return True 
开发者ID:prozum,项目名称:meson-cmake-wrapper,代码行数:20,代码来源:util.py

示例3: debug_remote

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def debug_remote(
    file, 
    port_num,
    debug_id,
    wait_on_exception,
    redirect_output, 
    wait_on_exit,
    break_on_systemexit_zero,
    debug_stdlib,
    run_as
):
    global BREAK_ON_SYSTEMEXIT_ZERO, DEBUG_STDLIB
    BREAK_ON_SYSTEMEXIT_ZERO = break_on_systemexit_zero
    DEBUG_STDLIB = debug_stdlib

    import datetime
    print('%s: Remote launcher starting ptvsd attach wait with File: %s, Port: %d, Id: %s\n' % (datetime.datetime.now(), file, port_num, debug_id))

    ptvsd.enable_attach(debug_id, address = ('0.0.0.0', port_num), redirect_output = redirect_output)
    try:
        import _ptvsdhelper
        if _ptvsdhelper.ping_debugger_for_attach():
            ptvsd.wait_for_attach()
    except ImportError:
        _ptvsdhelper = None

    # now execute main file
    globals_obj = {'__name__': '__main__'}
    if run_as == 'module':
        vspd.exec_module(file, globals_obj)
    elif run_as == 'code':
        vspd.exec_code(file, '<string>', globals_obj)
    else:
        vspd.exec_file(file, globals_obj)

# arguments are port, debug id, normal arguments which should include a filename to execute

# change to directory we expected to start from 
开发者ID:ms-iot,项目名称:iot-utilities,代码行数:40,代码来源:visualstudio_py_remote_launcher.py

示例4: __post_init__

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def __post_init__(self):
        if os.path.exists(self.output_dir) and os.listdir(
                self.output_dir) and self.do_train and not self.overwrite_output_dir:
            raise ValueError(
                "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
                    self.output_dir))

        # Setup distant debugging if needed
        if self.server_ip and self.server_port:
            # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
            import ptvsd
            print("Waiting for debugger attach")
            ptvsd.enable_attach(address=(self.server_ip, self.server_port), redirect_output=True)
            ptvsd.wait_for_attach()

        # Setup CUDA, GPU & distributed training
        if self.local_rank == -1 or self.no_cuda:
            device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
            self.n_gpu = torch.cuda.device_count()
        else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
            torch.cuda.set_device(self.local_rank)
            device = torch.device("cuda", self.local_rank)
            torch.distributed.init_process_group(backend='nccl')
            self.n_gpu = 1
        self.device = device
        self.output_mode = "classification"

        self.train_batch_size = self.per_gpu_train_batch_size * max(1, self.n_gpu)
        self.eval_batch_size = self.per_gpu_eval_batch_size * max(1, self.n_gpu)
        self._tokenizer = None
        self._model = None
        self._data_cache = None
        self.train_started = None 
开发者ID:paperswithcode,项目名称:axcell,代码行数:35,代码来源:transfo_experiment.py

示例5: __init__

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def __init__(self, organization, base_url, date_format, request_date_format = None, **kwargs):
        if config.debug:
            try:
                import ptvsd
                ptvsd.enable_attach(address=('0.0.0.0', 5860))
            except:
                # attach already enabled
                pass
            if not ptvsd.is_attached():
                ptvsd.wait_for_attach()
        
        self.organization = organization
        # date_format is the string that specifies the date style of the target website
        if request_date_format == None:
            request_date_format = date_format

        self.jobid = kwargs['_job'] if '_job' in kwargs else None

        self.session = HttpUtils.get_session()

        self.date_format = date_format
        self.time_utils = TimeUtils(date_format)
        self.base_url = base_url
        self.identifier = re.sub(r'\W', '', base_url)
        self.event_manager = EventManager()

        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        self.memory_handler = logging.handlers.MemoryHandler(0)
        self.memory_handler.setFormatter(formatter)

        self.stream_handler = logging.StreamHandler()
        self.stream_handler.setFormatter(formatter)

        self.configure_logger(self.name, self.memory_handler, logging.INFO)
        self.configure_logger(self.name, self.stream_handler, logging.INFO)
        self.configure_logger('scrapy', self.memory_handler, logging.WARNING)
        self.configure_logger('scrapy', self.stream_handler, logging.WARNING)
        self.configure_logger('twisted', self.memory_handler, logging.WARNING)
        self.configure_logger('twisted', self.stream_handler, logging.WARNING)
        
        start_date = datetime.now().strftime('%m-%d-%Y')
        end_date = (datetime.now() + relativedelta(months=+1)).strftime('%m-%d-%Y')
        
        request_format_utils = TimeUtils('%m-%d-%Y')
        # When this is running for multiple days, validating if the date is in the past causes issues
        self.start_date = request_format_utils.convert_date_format(start_date, request_date_format, validate_past=False)
        self.end_date = request_format_utils.convert_date_format(end_date, request_date_format, validate_past=False)
        self.start_timestamp = request_format_utils.min_timestamp_for_day(start_date)
        self.end_timestamp = request_format_utils.max_timestamp_for_day(end_date)

        if not config.bypass_auth:
            self.token = self.session.post(config.login, json={'email': config.scraper_username, 'password': config.scraper_password}).content.decode()
        else:
            self.token = None 
开发者ID:In2ItChicago,项目名称:In2ItChicago,代码行数:57,代码来源:aggregator_base.py

示例6: prepare

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def prepare(args):
    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    os.makedirs(args.output_dir, exist_ok=True)
    json.dump(args.__dict__, open(os.path.join(
        args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2)

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                   args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)

    # Set seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    logger.info("Training/evaluation parameters %s", args)

    # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
    # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
    # remove the need for this code, but it is still valid.
    if args.fp16:
        try:
            import apex
            apex.amp.register_half_function(torch, 'einsum')
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") 
开发者ID:microsoft,项目名称:unilm,代码行数:51,代码来源:run_seq2seq.py

示例7: generic_train

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def generic_train(model, args):
    # init model
    set_seed(args)

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))

    checkpoint_callback = pl.callbacks.ModelCheckpoint(
        filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=5
    )

    train_params = dict(
        accumulate_grad_batches=args.gradient_accumulation_steps,
        gpus=args.n_gpu,
        max_epochs=args.num_train_epochs,
        early_stop_callback=False,
        gradient_clip_val=args.max_grad_norm,
        checkpoint_callback=checkpoint_callback,
    )

    if args.fp16:
        train_params["use_amp"] = args.fp16
        train_params["amp_level"] = args.fp16_opt_level

    if args.n_tpu_cores > 0:
        global xm
        import torch_xla.core.xla_model as xm

        train_params["num_tpu_cores"] = args.n_tpu_cores
        train_params["gpus"] = 0

    if args.n_gpu > 1:
        train_params["distributed_backend"] = "ddp"

    trainer = pl.Trainer(**train_params)

    if args.do_train:
        trainer.fit(model)

    return trainer 
开发者ID:bhoov,项目名称:exbert,代码行数:51,代码来源:transformer_base.py

示例8: RunWithArgs

# 需要导入模块: import ptvsd [as 别名]
# 或者: from ptvsd import wait_for_attach [as 别名]
def RunWithArgs(
  main: Callable[[List[str]], None], argv: Optional[List[str]] = None,
):
  """Begin executing the program.

  Args:
    main: The main function to execute. It takes an single argument "argv",
      which is a list of command line arguments with parsed flags removed.
      If it returns an integer, it is used as the process's exit code.
    argv: A non-empty list of the command line arguments including program name,
      sys.argv is used if None.
  """

  def DoMain(argv):
    """Run the user-provided main method, with app-level arg handling."""
    if FLAGS.version:
      print(GetVersionInformationString())
      sys.exit(0)
    elif FLAGS.dump_flags:
      print(FlagsToString())
      sys.exit(0)
    elif FLAGS.dump_flags_to_json:
      print(
        json.dumps(
          FlagsToDict(), sort_keys=True, indent=2, separators=(",", ": ")
        )
      )
      sys.exit(0)

    # Optionally wait for the user to attach a Visual Studio (Code) debugger.
    # This requires additional configuration of the IDE.
    # See: https://stackoverflow.com/a/61367381
    if FLAGS.ptvsd:
      import ptvsd

      ptvsd.enable_attach(address=("localhost", 5724), redirect_output=True)
      Log(1, "Waiting to attach VS Code debugger on port 5724 ...")
      ptvsd.wait_for_attach()
      Log(1, "Debugger attached. Resuming ...")

    main(argv)

  try:
    absl_app.run(DoMain, argv=argv)
  except KeyboardInterrupt:
    FlushLogs()
    sys.stdout.flush()
    sys.stderr.flush()
    print("keyboard interrupt")
    sys.exit(1) 
开发者ID:ChrisCummins,项目名称:clgen,代码行数:52,代码来源:app.py


注:本文中的ptvsd.wait_for_attach方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。