asv7j commited on
Commit
d42d1b9
·
verified ·
1 Parent(s): 15a6b6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -6,6 +6,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
6
  device = "cpu"
7
 
8
  access_token = os.getenv("access_token")
 
9
 
10
  tokenizer1 = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
11
  tokenizer2 = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=access_token)
@@ -35,7 +36,7 @@ app = FastAPI()
35
  async def read_root():
36
  return {"Hello": "World!"}
37
 
38
- def modelResp1(id, prompt):
39
  messages = [
40
  {"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
41
  {"role": "user", "content": "Who are you?"},
@@ -53,10 +54,11 @@ def modelResp1(id, prompt):
53
  echo=False, # Whether to echo the prompt
54
  )
55
  response = output['choices'][0]['text']
56
-
57
- privatepayload['target'] = f"{id}"
58
- privatepayload['content'] = response
59
- requests.post(privateurl, headers=headers, data=privatepayload)
 
60
 
61
  def modelResp2(prompt):
62
  messages = [
@@ -103,7 +105,7 @@ def modelResp3(prompt):
103
  async def modelApi(data: dict):
104
  id = data.get("target_id")
105
  prompt = data.get("prompt")
106
- modelResp1(id, prompt)
107
 
108
  @app.post("/modelapi2")
109
  async def modelApi(data: dict):
@@ -123,7 +125,7 @@ headers = {
123
  'Accept-Encoding': 'gzip, deflate, br',
124
  'Accept-Language': 'en-US,en;q=0.9',
125
  'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
126
- 'Cookie': 'PHPSESSID=g0r2los456eaalf7epddmehabi; bc_userid=52113; bc_utk=aef8ec9314d8b64477cd86c80591f7e0c90f3faa',
127
  'Sec-Ch-Ua': '"Opera";v="95", "Chromium";v="109", "Not;A=Brand";v="24"',
128
  'Sec-Ch-Ua-Mobile': '?0',
129
  'Sec-Ch-Ua-Platform': '"Windows"',
 
6
  device = "cpu"
7
 
8
  access_token = os.getenv("access_token")
9
+ privateurl = os.getenv("privateurl")
10
 
11
  tokenizer1 = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
12
  tokenizer2 = AutoTokenizer.from_pretrained("google/gemma-2-2b-it", token=access_token)
 
36
  async def read_root():
37
  return {"Hello": "World!"}
38
 
39
+ def modelResp1(cookie, id, token, prompt):
40
  messages = [
41
  {"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
42
  {"role": "user", "content": "Who are you?"},
 
54
  echo=False, # Whether to echo the prompt
55
  )
56
  response = output['choices'][0]['text']
57
+ headers['Cookie'] = f"{cookie}"
58
+ payload['token'] = f"{token}"
59
+ payload['target'] = f"{id}"
60
+ payload['content'] = response
61
+ requests.post(privateurl, headers=headers, data=payload)
62
 
63
  def modelResp2(prompt):
64
  messages = [
 
105
  async def modelApi(data: dict):
106
  id = data.get("target_id")
107
  prompt = data.get("prompt")
108
+ modelResp1(cookie, id, token, prompt)
109
 
110
  @app.post("/modelapi2")
111
  async def modelApi(data: dict):
 
125
  'Accept-Encoding': 'gzip, deflate, br',
126
  'Accept-Language': 'en-US,en;q=0.9',
127
  'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
128
+ 'Cookie': '',
129
  'Sec-Ch-Ua': '"Opera";v="95", "Chromium";v="109", "Not;A=Brand";v="24"',
130
  'Sec-Ch-Ua-Mobile': '?0',
131
  'Sec-Ch-Ua-Platform': '"Windows"',