当前位置: 首页>>代码示例>>Python>>正文


Python MobileRestClient.verify_docs_in_changes方法代码示例

本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient.verify_docs_in_changes方法的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient.verify_docs_in_changes方法的具体用法?Python MobileRestClient.verify_docs_in_changes怎么用?Python MobileRestClient.verify_docs_in_changes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keywords.MobileRestClient.MobileRestClient的用法示例。


在下文中一共展示了MobileRestClient.verify_docs_in_changes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_peer_2_peer_sanity_pull

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_peer_2_peer_sanity_pull(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    4. Add 5000 docs to LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 5000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1

    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:42,代码来源:test_peer_2_peer_sanity.py

示例2: test_initial_push_replication

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare LiteServ to have 10000 documents.
    2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
    3. Verify if all of the docs get pushed.
    """

    sg_db = "db"
    ls_db = "ls_db"
    seth_channels = ["ABC", "NBC"]

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="password", channels=seth_channels)
    session = client.create_session(sg_one_admin, sg_db, "seth")

    client.create_database(url=ls_url, name=ls_db)

    # Create 'num_docs' docs on LiteServ
    docs = client.add_docs(
        url=ls_url,
        db=ls_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        channels=seth_channels
    )
    assert len(docs) == num_docs

    # Start push replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to sync_gateway
    client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    # Verify docs show up in sync_gateway's changes feed
    client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:82,代码来源:test_replication.py

示例3: test_backfill_channels_looping_longpoll_changes

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_backfill_channels_looping_longpoll_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Create a dictionary keyed on doc id for all of channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(ids_and_revs_from_a_docs.keys()) == 50

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    with concurrent.futures.ProcessPoolExecutor() as ex:

        # Start long poll changes feed.
        changes_task = ex.submit(client.get_changes,
                                 url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20)

        # Grant access to channel "A"
        if grant_type == "CHANNEL-REST":
            log_info("Granting user access to channel A via Admin REST user update")
            # Grant via update to user in Admin API
            client.update_user(url=sg_admin_url, db=sg_db,
                               name=user_b_user_info.name, channels=["A", "B"])

        elif grant_type == "CHANNEL-SYNC":
            log_info("Granting user access to channel A sync function access()")
            # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
            access_doc = document.create_doc("channel_access", channels=["A"])
            access_doc["users"] = ["USER_B"]
            client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

        elif grant_type == "ROLE-REST":
            log_info("Granting user access to channel A via Admin REST role grant")
            # Create role with channel A
            client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
            client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

        elif grant_type == "ROLE-SYNC":
            log_info("Granting user access to channel A via sync function role() grant")
            # Create role with channel A
            client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

            # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
            role_access_doc = document.create_doc("role_access")
            role_access_doc["users"] = ["USER_B"]
            role_access_doc["roles"] = ["role:channel-A-role"]
            client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

        else:
            pytest.fail("Unsupported grant_type!!!!")

        # Block on return of longpoll changes, feed should wake up and return 20 results
        changes = changes_task.result()

    assert len(changes["results"]) == 20
    num_requests = 1

    # Cross the results off from the 'a_docs' dictionary
    for doc in changes["results"]:
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_changes_backfill.py

示例4: test_backfill_channels_oneshot_changes

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    # Grant access to channel "A"
    if grant_type == "CHANNEL-REST":
        log_info("Granting user access to channel A via Admin REST user update")
        # Grant via update to user in Admin API
        client.update_user(url=sg_admin_url, db=sg_db,
                           name=user_b_user_info.name, channels=["A", "B"])

    elif grant_type == "CHANNEL-SYNC":
        log_info("Granting user access to channel A sync function access()")
        # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
        access_doc = document.create_doc("channel_access", channels=["A"])
        access_doc["users"] = ["USER_B"]
        client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

    elif grant_type == "ROLE-REST":
        log_info("Granting user access to channel A via Admin REST role grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
        client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

    elif grant_type == "ROLE-SYNC":
        log_info("Granting user access to channel A via sync function role() grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

        # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
        role_access_doc = document.create_doc("role_access")
        role_access_doc["users"] = ["USER_B"]
        role_access_doc["roles"] = ["role:channel-A-role"]
        client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

    else:
        pytest.fail("Unsupported grant_type!!!!")

    user_b_changes_after_grant = client.get_changes(url=sg_url, db=sg_db,
                                                    since=user_b_changes["last_seq"], auth=user_b_session, feed="normal")

    # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant
    # is via the sync function
    changes_results = user_b_changes_after_grant["results"]
    assert 50 <= len(changes_results) <= 51

    # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes
    ids_and_revs_from_user_changes = {
        change["id"]: change["changes"][0]["rev"]
        for change in changes_results if not change["id"].startswith("_user/")
    }

    assert len(ids_and_revs_from_user_changes) == 50

    # Create a list of id rev pair of all of the channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_changes_backfill.py

示例5: test_backfill_channels_oneshot_limit_changes

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    # Grant access to channel "A"
    if grant_type == "CHANNEL-REST":
        log_info("Granting user access to channel A via Admin REST user update")
        # Grant via update to user in Admin API
        client.update_user(url=sg_admin_url, db=sg_db,
                           name=user_b_user_info.name, channels=["A", "B"])

    elif grant_type == "CHANNEL-SYNC":
        log_info("Granting user access to channel A sync function access()")
        # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
        access_doc = document.create_doc("channel_access", channels=["A"])
        access_doc["users"] = ["USER_B"]
        client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

    elif grant_type == "ROLE-REST":
        log_info("Granting user access to channel A via Admin REST role grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
        client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

    elif grant_type == "ROLE-SYNC":
        log_info("Granting user access to channel A via sync function role() grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

        # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
        role_access_doc = document.create_doc("role_access")
        role_access_doc["users"] = ["USER_B"]
        role_access_doc["roles"] = ["role:channel-A-role"]
        client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

    else:
        pytest.fail("Unsupported grant_type!!!!")

    # Create a dictionary keyed on doc id for all of channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(ids_and_revs_from_a_docs.keys()) == 50

    log_info("Doing 3, 1 shot changes with limit and last seq!")
    # Issue 3 oneshot changes with a limit of 20

    #################
    # Changes Req #1
    #################
    user_b_changes_after_grant_one = client.get_changes(url=sg_url, db=sg_db,
                                                        since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20)

    # User B shoud have recieved 20 docs due to limit
    assert len(user_b_changes_after_grant_one["results"]) == 20

    for doc in user_b_changes_after_grant_one["results"]:
        # cross off keys found from 'a_docs' dictionary
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_changes_backfill.py

示例6: test_remove_add_channels_to_doc

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_remove_add_channels_to_doc(params_from_base_test_setup, sg_conf_name):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A", "B"], roles=[])
    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])

    admin_user_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=admin_user_info.name,
        password=admin_user_info.password,
        channels=admin_user_info.channels,
    )

    a_user_auth = client.create_user(
        url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels
    )

    a_docs = client.add_docs(
        url=sg_url, db=sg_db, number=50, id_prefix="a_doc", auth=admin_user_auth, channels=admin_user_info.channels
    )

    # Build dictionay of a_docs
    a_docs_id_rev = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(a_docs_id_rev) == 50

    # Wait for all docs to show up in changes
    client.verify_doc_id_in_changes(sg_url, sg_db, expected_doc_id="_user/a_user", auth=a_user_auth)
    client.verify_docs_in_changes(sg_url, sg_db, expected_docs=a_docs, auth=a_user_auth)

    # Get changes for 'a_user'
    a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=a_user_auth, feed="normal")

    # 'a_user' should get 50 'a_doc_*' doc and 1 '_user/a_user' doc
    assert len(a_user_changes["results"]) == 51

    ###########################
    # Remove Channels from doc
    ###########################

    # Copy a_docs_id_rev to dictionary to scratch off values
    remove_docs_scratch_off = a_docs_id_rev.copy()
    assert len(remove_docs_scratch_off) == 50

    # Use admin user to update the docs to remove 'A' from the channels property on the doc and add 'B'
    client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["B"])

    # Longpoll loop requires due to the delay that changes take to permeate to the client
    changes_timeout = 10
    start = time.time()
    last_seq = a_user_changes["last_seq"]
    while True:

        # If take longer than 10 seconds, fail the test
        if time.time() - start > changes_timeout:
            raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed")

        # We found everything, exit loop!
        if remove_docs_scratch_off == {}:
            log_info("All expected docs found to be removed")
            break

        # Get changes for 'a_user' from last_seq
        a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10)
        assert len(a_user_changes["results"]) > 0

        # Loop over changes found and perform the following
        #   1. Check that the docs is flagged with 'removed'
        #   2. Cross off the doc fromt the the 'remove_docs_scratch_off'
        for change in a_user_changes["results"]:
            assert change["removed"] == ["A"]
            assert change["changes"][0]["rev"].startswith("2-")
            # This will blow up if any change is not found in that dictionary
            del remove_docs_scratch_off[change["id"]]

        # Update last_seq
        last_seq = a_user_changes["last_seq"]

    # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results
    a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal")
    assert len(a_user_changes["results"]) == 0

    #########################
    # Add Channels to doc
    #########################

#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_channels.py

示例7: test_longpoll_awaken_roles

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_longpoll_awaken_roles(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    cluster_topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
    sg_url = cluster_topology["sync_gateways"][0]["public"]

    log_info("sg_conf: {}".format(sg_conf))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin_role = "admin_role"
    admin_channel = "admin_channel"

    admin_user_info = userinfo.UserInfo(name="admin", password="pass", channels=[], roles=[admin_role])
    adam_user_info = userinfo.UserInfo(name="adam", password="Adampass1", channels=[], roles=[])
    traun_user_info = userinfo.UserInfo(name="traun", password="Traunpass1", channels=[], roles=[])
    andy_user_info = userinfo.UserInfo(name="andy", password="Andypass1", channels=[], roles=[])
    sg_db = "db"

    client = MobileRestClient()

    # Create a role on sync_gateway
    client.create_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel])

    # Create users with no channels or roles
    admin_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=admin_user_info.name, password=admin_user_info.password, roles=[admin_role])

    adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=adam_user_info.name, password=adam_user_info.password)

    traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=traun_user_info.name, password=traun_user_info.password)

    andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=andy_user_info.name, password=andy_user_info.password)

    ################################
    # change feed wakes for role add
    ################################

    # Get starting sequence of docs, use the last seq to progress past any _user docs.
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    # Add doc with channel associated with the admin role
    admin_doc = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="admin_doc", auth=admin_auth, channels=[admin_channel])
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=admin_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        adam_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                       name=adam_user_info.name, password=adam_user_info.password, roles=[admin_role])

        traun_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                        name=traun_user_info.name, password=traun_user_info.password, roles=[admin_role])

        andy_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                       name=andy_user_info.name, password=andy_user_info.password, roles=[admin_role])

        adam_changes = adam_changes_task.result()
        assert 1 <= len(adam_changes["results"]) <= 2
        assert adam_changes["results"][0]["id"] == "admin_doc_0" or adam_changes["results"][0]["id"] == "_user/adam"

        traun_changes = traun_changes_task.result()
        assert 1 <= len(traun_changes["results"]) <= 2
        assert traun_changes["results"][0]["id"] == "admin_doc_0" or traun_changes["results"][0]["id"] == "_user/traun"

        andy_changes = andy_changes_task.result()
        assert 1 <= len(andy_changes["results"]) <= 2
        assert andy_changes["results"][0]["id"] == "admin_doc_0" or andy_changes["results"][0]["id"] == "_user/andy"

    # Check that the user docs all show up in changes feed
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth)

    # Check that the admin doc made it to all the changes feeds
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=adam_auth)
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_longpoll.py

示例8: test_take_down_bring_up_sg_accel_validate_cbgt

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf):
    """
    Scenario 1

    Start with 3 sg_accels
    Take down 2 sg_accels (block until down -- poll port if needed)
    Doc adds with uuids (~30 sec for cbgt to reshard)
    polling loop: wait for all docs to come back over changes feed
    Call validate pindex with correct number of accels

    Scenario 2 (Continuation)

    When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes,
    then you could validate the pindex with 2 accels
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels
    )

    log_info("Shutting down sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2]))
    # Shutdown two accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0

    log_info("Finished taking nodes down!")

    # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During
    # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed
    # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up.
    doc_pusher_docs = client.add_docs(
        url=sg_url,
        db=sg_db,
        number=1000,
        id_prefix=None,
        auth=doc_pusher_auth,
        channels=doc_pusher_user_info.channels
    )
    assert len(doc_pusher_docs) == 1000
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5)

    # The pindexes should be reshared at this point since all of the changes have shown up
    assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1)

    log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2]))

    # Start two accel nodes in parallel
    status = cluster.sg_accels[1].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=2)

    status = cluster.sg_accels[2].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=3)
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:87,代码来源:test_multiple_accels.py

示例9: test_server_goes_down_sanity

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_server_goes_down_sanity(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    sg_user_name = "seth"
    sg_user_password = "password"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    main_server = CouchbaseServer(cbs_one_url)
    flakey_server = CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg, sg_db, sg_user_name)

    # Stop second server
    flakey_server.stop()

    # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout
    errors = num_docs

    # Wait 30 seconds for auto failover
    # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html)
    # + 15 seconds to add docs
    timeout = 45
    start = time.time()

    successful_add = False
    while not successful_add:

        # Fail tests if all docs do not succeed before timeout
        if (time.time() - start) > timeout:
            # Bring server back up before failing the test
            flakey_server.start()
            main_server.rebalance_in(coucbase_servers, flakey_server)
            raise TimeoutError("Failed to successfully put docs before timeout")

        try:
            docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels)

            # If the above add doc does not throw, it was a successfull add.
            successful_add = True
        except requests.exceptions.HTTPError as he:
            log_info("Failed to add docs: {}".format(he))

        log_info("Seeing: {} errors".format(errors))
        time.sleep(1)

    assert len(docs) == 100
    client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session)

    try:
        client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5)
    except keywords.exceptions.TimeoutException:
        # timeout verifying docs. Bring server back in to restore topology, then fail
        # Failing due to https://github.com/couchbase/sync_gateway/issues/2197
        flakey_server.start()
        main_server.recover(flakey_server)
        main_server.rebalance_in(coucbase_servers, flakey_server)
        raise keywords.exceptions.TimeoutException("Failed to get all changes")

    # Test succeeded without timeout, bring server back into topology
    flakey_server.start()
    main_server.recover(flakey_server)
    main_server.rebalance_in(coucbase_servers, flakey_server)
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_multiple_servers.py

示例10: test_peer_2_peer_sanity

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_peer_2_peer_sanity(setup_p2p_test):
    """
    1. Sanity P2P Scenario
    2. Launch LiteServ 1 and LiteServ 2
    3. Create a database on each LiteServ
    4. Start continuous push pull replication from each db to the other
    5. Add docs to each db
    6. Verify the docs show up at each db
    7. Verify the docs show up in the database's changes feed.
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 1000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    repl_one = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2
    )

    repl_two = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    repl_three = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_db=ls_db2,
        to_url=ls_url_one, to_db=ls_db1
    )

    repl_four = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_url=ls_url_one, from_db=ls_db1,
        to_db=ls_db2
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four)

    ls_url_one_replications = client.get_replications(ls_url_one)
    assert len(ls_url_one_replications) == 2

    ls_url_two_replications = client.get_replications(ls_url_two)
    assert len(ls_url_two_replications) == 2

    ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1")
    assert len(ls_db1_docs) == num_docs_per_db

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    all_docs = client.merge(ls_db1_docs, ls_db2_docs)
    assert len(all_docs) == 2000

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs)

    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:82,代码来源:test_peer_2_peer_sanity.py

示例11: test_rebalance_sanity

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_rebalance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg_one = topology["sync_gateways"][0]["admin"]
    sg_one_url = topology["sync_gateways"][0]["public"]

    cluster_servers = topology["couchbase_servers"]
    cbs_one_url = cluster_servers[0]
    cbs_two_url = cluster_servers[1]

    log_info("Running: 'test_distributed_index_rebalance_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg_one))
    log_info("sg_url: {}".format(sg_one_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    num_updates = 100
    sg_user_name = "seth"
    sg_user_password = "password"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    cb_server = CouchbaseServer(cbs_one_url)
    server_to_remove = CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    with concurrent.futures.ThreadPoolExecutor(5) as executor:

        # Add docs to sg
        log_info("Adding docs to sync_gateway")
        docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        # Start updating docs and rebalance out one CBS node
        log_info("Updating docs on sync_gateway")
        update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session)

        # Run rebalance in background
        cb_server.rebalance_out(cluster_servers, server_to_remove)

        updated_docs = update_docs_task.result()
        log_info(updated_docs)

    # Verify docs / revisions present
    client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session)

    # Verify docs revisions in changes feed
    client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session)

    # Rebalance Server back in to the pool
    cb_server.add_node(server_to_remove)
    cb_server.rebalance_in(cluster_servers, server_to_remove)
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:71,代码来源:test_multiple_servers.py

示例12: test_server_goes_down_rebuild_channels

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_server_goes_down_rebuild_channels(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100

    admin_user_info = userinfo.UserInfo(
        name="admin",
        password="password",
        channels=["ABC"],
        roles=[]
    )

    seth_user_info = userinfo.UserInfo(
        name="seth",
        password="password",
        channels=["ABC"],
        roles=[]
    )

    client = MobileRestClient()
    main_server = CouchbaseServer(cbs_one_url)
    flakey_server = CouchbaseServer(cbs_two_url)

    admin_auth = client.create_user(
        admin_sg,
        sg_db,
        admin_user_info.name,
        admin_user_info.password,
        channels=admin_user_info.channels
    )

    client.create_user(
        admin_sg,
        sg_db,
        seth_user_info.name,
        seth_user_info.password,
        channels=seth_user_info.channels
    )
    seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name)

    # allow any user docs to make it to changes
    initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session)

    # push docs from admin
    docs = client.add_docs(
        url=sg_url,
        db=sg_db,
        number=num_docs,
        id_prefix=None,
        channels=admin_user_info.channels,
        auth=admin_auth
    )

    assert len(docs) == num_docs

    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session)
    changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session)
    assert len(changes_before_failover["results"]) == num_docs

    # Stop server via 'service stop'
    flakey_server.stop()

    start = time.time()
    while True:
        # Fail tests if all docs do not succeed before timeout
#.........这里部分代码省略.........
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:103,代码来源:test_multiple_servers.py

示例13: test_take_all_sgaccels_down

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]

#.........这里部分代码省略.........

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 1000

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels
    )

    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])
    client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=a_user_info.name,
        password=a_user_info.password,
        channels=a_user_info.channels
    )
    a_user_session = client.create_session(
        url=sg_admin_url,
        db=sg_db,
        name=a_user_info.name,
        password=a_user_info.password
    )

    # Shutdown all accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:

        # Start adding docs
        docs_1 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels)
        docs_1_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_1, auth=doc_pusher_auth)

        # Take down all access nodes
        log_info("Shutting down sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0],
            cluster.sg_accels[1],
            cluster.sg_accels[2]
        ))
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0
        assert sg_accel_down_task_3.result() == 0

        # Block until bulk_docs is complete
        doc_push_result_1 = docs_1_task.result()
        assert len(doc_push_result_1) == num_docs
        client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_1, auth=doc_pusher_auth)

        # Load sync_gateway with another batch of docs while the sg_accel nodes are offline
        docs_2_bodies = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels)
        docs_push_result_2 = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs_2_bodies, auth=doc_pusher_auth)
        assert len(docs_push_result_2) == num_docs
        client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs_push_result_2, auth=doc_pusher_auth)

        # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online
        docs_3 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels)
        docs_3_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_3, auth=doc_pusher_auth)

        # Bring all the sg_accel nodes back up
        # Take down all access nodes
        log_info("Starting sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0],
            cluster.sg_accels[1],
            cluster.sg_accels[2]
        ))
        sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf)
        sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf)
        sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf)
        assert sg_accel_up_task_1.result() == 0
        assert sg_accel_up_task_2.result() == 0
        assert sg_accel_up_task_3.result() == 0

        # Wait for pindex to reshard correctly
        assert cluster.validate_cbgt_pindex_distribution_retry(3)

        # Block until second bulk_docs is complete
        doc_push_result_3 = docs_3_task.result()
        assert len(doc_push_result_3) == num_docs
        client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_3, auth=doc_pusher_auth)

    # Combine the 3 push results and make sure the changes propagate to a_user
    # a_user has access to the doc's channel.
    log_info("Verifying all the changes show up for 'a_user' ...")
    all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=a_user_session, polling_interval=2)
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:104,代码来源:test_multiple_accels.py

示例14: test_initial_pull_replication

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]
def test_initial_pull_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare sync-gateway to have 10000 documents.
    2. Create a single shot / continuous pull replicator and to pull the docs into a database.
    3. Verify if all of the docs get pulled.
    Referenced issue: couchbase/couchbase-lite-android#955.
    """

    sg_db = "db"
    ls_db = "ls_db"

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="password", channels=["ABC", "NBC"])
    session = client.create_session(sg_one_admin, sg_db, "seth")

    # Create 'num_docs' docs on sync_gateway
    docs = client.add_docs(
        url=sg_one_public,
        db=sg_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        auth=session
    )
    assert len(docs) == num_docs

    client.create_database(url=ls_url, name=ls_db)

    # Start oneshot pull replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    start = time.time()

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        # Android will report IDLE status, and drop into the 'verify_docs_present' below
        # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to client
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)

    all_docs_replicated_time = time.time() - start
    log_info("Replication took: {}s".format(all_docs_replicated_time))

    # Verify docs show up in client's changes feed
    client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:89,代码来源:test_replication.py

示例15: test_longpoll_awaken_channels

# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_in_changes [as 别名]

#.........这里部分代码省略.........
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == doc_id

        # Verify that the changes feed is still listening for Traun and Andy
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Update the traun and andy to have one of adam's channels
        update_traun_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db,
                                           name=traun_user_info.name, password=traun_user_info.password, channels=["NBC"])
        traun_auth = update_traun_user_task.result()

        update_andy_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db,
                                          name=andy_user_info.name, password=andy_user_info.password, channels=["ABC"])
        andy_auth = update_andy_user_task.result()

        # Make sure changes feed wakes up and contains at least one change, 2 may be possible if the _user doc is included
        # Make sure the first change is 'adam_doc'
        traun_changes = traun_changes_task.result()
        assert 1 <= len(traun_changes["results"]) <= 2
        assert traun_changes["results"][0]["id"] == "adam_doc_0" or traun_changes["results"][0]["id"] == "_user/traun"

        andy_changes = andy_changes_task.result()
        assert 1 <= len(andy_changes["results"]) <= 2
        assert andy_changes["results"][0]["id"] == "adam_doc_0" or andy_changes["results"][0]["id"] == "_user/andy"

    # Block until user docs are seen
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth)

    # Make sure that adams doc shows up in changes due to the fact that the changes feed may be woken up with a _user doc above
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=adam_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=traun_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=andy_auth)

    ############################################################
    # changes feed wakes with Channel Removal via Sync function
    ############################################################

    # Get latest last_seq for next test section
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:

        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Remove the channels property from the doc
        client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=traun_auth, channels=[])

        # All three changes feeds should wake up and return one result
开发者ID:couchbaselabs,项目名称:mobile-testkit,代码行数:70,代码来源:test_longpoll.py


注:本文中的keywords.MobileRestClient.MobileRestClient.verify_docs_in_changes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。