File size: 1,697 Bytes
3932407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import pathlib

from .utils import *
from .assertions import assert_http_ok

N_PEERS = 5
N_COLLECTIONS = 20


def test_many_collections(tmp_path: pathlib.Path):
    assert_project_root()
    peer_dirs = make_peer_folders(tmp_path, N_PEERS)

    # Gathers REST API uris
    peer_api_uris = []

    # Start bootstrap
    (bootstrap_api_uri, bootstrap_uri) = start_first_peer(
        peer_dirs[0], "peer_0_0.log")
    peer_api_uris.append(bootstrap_api_uri)

    # Wait for leader
    leader = wait_peer_added(bootstrap_api_uri)

    # Start other peers
    for i in range(1, len(peer_dirs)):
        peer_api_uris.append(start_peer(
            peer_dirs[i], f"peer_0_{i}.log", bootstrap_uri))

    # Wait for cluster
    wait_for_uniform_cluster_status(peer_api_uris, leader)

    # Check that there are no collections on all peers
    for uri in peer_api_uris:
        r = requests.get(f"{uri}/collections")
        assert_http_ok(r)
        assert len(r.json()["result"]["collections"]) == 0

    # Create N_COLLECTIONS on different peers
    for i in range(1, N_COLLECTIONS + 1):
        peer = peer_api_uris[i % N_PEERS]
        print(f"creating test_collection_{i} on {peer}")
        r = requests.put(
            f"{peer}/collections/test_collection_{i}", json={
                "vectors": {
                    "size": 4,
                    "distance": "Dot"
                },
                "shard_number": 1  # single shard
            })
        assert_http_ok(r)

    # Check that all collections exist on all peers
    for i in range(1, N_COLLECTIONS + 1):
        wait_collection_exists_and_active_on_all_peers(collection_name=f"test_collection_{i}", peer_api_uris=peer_api_uris)