Jangai commited on
Commit
b3770ab
·
verified ·
1 Parent(s): d2271e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -17,32 +17,31 @@ SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-dif
17
  def query_zephyr(linkedin_text):
18
  headers = {
19
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
20
- "Content-Type": "application/json",
21
  }
22
 
23
- # Adapting the conversational format from the pipeline example
24
- prompt = "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"
25
- messages = [
26
- {"role": "system", "content": prompt},
27
- {"role": "user", "content": linkedin_text}
28
- ]
29
-
30
- # Structuring the payload for a conversational input
31
  payload = {
32
- "inputs": {
33
- "past_user_inputs": [],
34
- "generated_responses": [],
35
- "text": linkedin_text, # or possibly serialize the messages list if needed
36
- "conversation": messages, # This might need to be adjusted based on Zephyr's API requirements
 
 
 
 
37
  }
38
  }
39
- # Sending the HTTP POST request
40
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
41
  if response.status_code == 200:
42
  return response.json()
43
  else:
 
44
  print(response.text) # To get more insight into what went wrong
45
- raise Exception(f"Failed to query Zephyr model, status code: {response.status_code}")
46
 
47
 
48
 
 
17
  def query_zephyr(linkedin_text):
18
  headers = {
19
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
 
20
  }
21
 
22
+ # Assuming you're formatting a request that includes conversational context
23
+ # and possibly leveraging detailed parameters like temperature and max_new_tokens
24
+ # Adjust these parameters as needed based on your requirements
 
 
 
 
 
25
  payload = {
26
+ "inputs": linkedin_text,
27
+ "parameters": {
28
+ "temperature": 0.7,
29
+ "max_new_tokens": 50, # Control how long the generated response should be
30
+ # Add other parameters as needed
31
+ },
32
+ "options": {
33
+ "use_cache": False, # Set to False to not use the cache
34
+ "wait_for_model": True, # Wait for the model to be ready if it's currently loading
35
  }
36
  }
37
+
38
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
39
  if response.status_code == 200:
40
  return response.json()
41
  else:
42
+ print(f"Failed to query Zephyr model, status code: {response.status_code}")
43
  print(response.text) # To get more insight into what went wrong
44
+ return Noneto query Zephyr model, status code: {response.status_code}")
45
 
46
 
47