File size: 2,375 Bytes
3d3362f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env python
import json
import sys

# pip install websocket-client
import websocket

class ModelClient(object):
    def __init__(self, endpoint_url):
        self.endpoint_url = endpoint_url
        self.ws = None
        self.model = None

    def open_session(self, model, max_length):
        self.ws = websocket.create_connection(self.endpoint_url)
        self.model = model
        payload = {
                "type": "open_inference_session",
                "model": self.model,
                "max_length": max_length,
            }
        self.ws.send(json.dumps(payload))
        assert json.loads(self.ws.recv())['ok'] == True

    def close_session(self):
        if self.ws:
            self.ws.close()

    def generate(self, prompt, **kwargs):
        payload = {
                "type": "generate",
                "inputs": prompt,
                "max_new_tokens": 1,
                "do_sample": 0,
                "temperature": 0,
                "stop_sequence": "</s>" if "bloomz" in self.model else "\n\n",
            }
        payload = {**payload, **kwargs}
        self.ws.send(json.dumps(payload))

        while True:
            try:
                data = json.loads(self.ws.recv())
            except json.decoder.JSONDecodeError:
                self.close_session()
                raise

            if not data['ok']:
                raise Exception(data['traceback'])
            yield data['outputs']
            if data['stop']:
                break
 
def main():
    client = ModelClient("ws://localhost:8000/api/v2/generate")
    # client = ModelClient("ws://chat.petals.ml/api/v2/generate")
    client.open_session("bigscience/bloom-petals", 128)

    if len(sys.argv) > 1:
        prompt = sys.argv[1]
        # Bloomz variant uses </s> instead of \n\n as an eos token
        if not prompt.endswith("\n\n"):
            prompt += "\n\n"
    else:
        prompt = "The SQL command to extract all the users whose name starts with A is: \n\n"
        print(f"Prompt: {prompt}")

    # petals.client.routing.sequence_manager.MissingBlocksError
    for out in client.generate(prompt,
                            do_sample=True,
                            temperature=0.75,
                            top_p=0.9):
        print(out, end="", flush=True)

    client.close_session()

if __name__ == '__main__':
    main()