Jangai commited on
Commit
53732b8
·
verified ·
1 Parent(s): b3770ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -14,25 +14,20 @@ if not ZEPHYR_API_TOKEN or not SD_API_TOKEN:
14
  ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
15
  SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
16
 
17
- def query_zephyr(linkedin_text):
18
  headers = {
19
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
 
20
  }
21
 
22
- # Assuming you're formatting a request that includes conversational context
23
- # and possibly leveraging detailed parameters like temperature and max_new_tokens
24
- # Adjust these parameters as needed based on your requirements
 
 
 
 
25
  payload = {
26
- "inputs": linkedin_text,
27
- "parameters": {
28
- "temperature": 0.7,
29
- "max_new_tokens": 50, # Control how long the generated response should be
30
- # Add other parameters as needed
31
- },
32
- "options": {
33
- "use_cache": False, # Set to False to not use the cache
34
- "wait_for_model": True, # Wait for the model to be ready if it's currently loading
35
- }
36
  }
37
 
38
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
@@ -40,9 +35,16 @@ def query_zephyr(linkedin_text):
40
  return response.json()
41
  else:
42
  print(f"Failed to query Zephyr model, status code: {response.status_code}")
43
- print(response.text) # To get more insight into what went wrong
44
- return Noneto query Zephyr model, status code: {response.status_code}")
45
 
 
 
 
 
 
 
 
46
 
47
 
48
  def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):
 
14
  ZEPHYR_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
15
  SD_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
16
 
 
17
  headers = {
18
  "Authorization": f"Bearer {ZEPHYR_API_TOKEN}",
19
+ "Content-Type": "application/json",
20
  }
21
 
22
+ # Assuming Zephyr supports a similar conversational structure
23
+ chat = [
24
+ {"role": "system", "content": "Prepare a prompt for Stable Diffusion for the following LinkedIn post:"},
25
+ {"role": "user", "content": linkedin_text},
26
+ # You can add more turns here if necessary
27
+ ]
28
+
29
  payload = {
30
+ "inputs": chat,
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
  response = requests.post(ZEPHYR_API_URL, headers=headers, json=payload)
 
35
  return response.json()
36
  else:
37
  print(f"Failed to query Zephyr model, status code: {response.status_code}")
38
+ print(response.text) # Provides insight into what went wrong
39
+ return None
40
 
41
+ # Example LinkedIn post text
42
+ linkedin_text = "Example LinkedIn post content here. How should this be visualized?"
43
+ zephyr_response = query_zephyr_system_user_format(linkedin_text)
44
+ if zephyr_response:
45
+ print(zephyr_response)
46
+ else:
47
+ print("Error querying the Zephyr model.")
48
 
49
 
50
  def generate_image_from_prompt(prompt, negative_prompt, guidance_scale, width, height, num_inference_steps):