File size: 2,757 Bytes
e48ab6b
 
 
 
 
2c77c32
8cccf6a
cb92e5b
5c54d1b
669a4c0
e48ab6b
24fbd15
 
e48ab6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50be456
 
 
 
 
3e48437
61e57ac
3e48437
 
 
50be456
8fefd32
 
 
 
 
 
 
 
 
 
 
96906d9
61e57ac
 
 
7b0437a
 
 
 
 
 
4c9e80d
7b0437a
96906d9
 
 
3e48437
 
8fefd32
 
 
 
 
 
 
96906d9
7b0437a
 
 
 
 
 
02c9e3b
7b0437a
 
4c9e80d
7b0437a
96906d9
 
 
e48ab6b
 
 
 
f0e04ff
e48ab6b
24fbd15
1418034
e48ab6b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from threading import Thread
import gradio as gr
import inspect
from gradio import routes
from typing import List, Type
from petals import AutoDistributedModelForCausalLM
from transformers import AutoTokenizer
import npc_data
import requests, os, re, asyncio, json

loop = asyncio.get_event_loop()

# init code
def get_types(cls_set: List[Type], component: str):
    docset = []
    types = []
    if component == "input":
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[1].split(":")[-1])
            types.append(doc_lines[1].split(")")[0].split("(")[-1])
    else:
        for cls in cls_set:
            doc = inspect.getdoc(cls)
            doc_lines = doc.split("\n")
            docset.append(doc_lines[-1].split(":")[-1])
            types.append(doc_lines[-1].split(")")[0].split("(")[-1])
    return docset, types
routes.get_types = get_types

# App code

model_name = "daekeun-ml/Llama-2-ko-instruct-13B"

#daekeun-ml/Llama-2-ko-instruct-13B
#quantumaikr/llama-2-70b-fb16-korean
tokenizer = AutoTokenizer.from_pretrained(model_name)

model = None
def init():
    if check:
        model = AutoDistributedModelForCausalLM.from_pretrained(model_name)


def check(model_name):
    data = requests.get("https://health.petals.dev/api/v1/state").json()
    out = []
    for d in data['model_reports']:
        if d['name'] == model_name:
            if d['state']=="healthy":
                return True
    return False


def chat(id, npc, prompt):
    if model == None:
        init()
        return "no model"
    # get_coin endpoint
    response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_6", json={
      "data": [
        id,
    ]}).json()
    
    coin = response["data"][0]
    if int(coin) == 0:
        return "no coin"
    
    # model inference

    init()
    if check:
        prom = ""
        inputs = tokenizer(prom, return_tensors="pt")["input_ids"]
        outputs = model.generate(inputs, max_new_tokens=100)
        print(tokenizer.decode(outputs[0]))
    else:
        output = "no model"


    # add_transaction endpoint
    response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_5", json={
      "data": [
        id, 
        "inference", 
        "### input:\n" + prompt + "\n\n### output:\n" + output
    ]}).json()
    
    d = response["data"][0]
    
    return output


with gr.Blocks() as demo:
    count = 0
    aa = gr.Interface(
      fn=chat,
      inputs=["text","text","text"],
      outputs="text",
      description="chat, ai 응닡을 λ°˜ν™˜ν•©λ‹ˆλ‹€. λ‚΄λΆ€μ μœΌλ‘œ νŠΈλžœμž­μ…˜ 생성. \n /run/predict",
    )
    
    demo.queue(max_size=32).launch(enable_queue=True)