saranbalan commited on
Commit
c49076d
·
verified ·
1 Parent(s): 353c901

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -31,25 +31,21 @@ except Exception as e:
31
  # Set device: CUDA if available, else CPU
32
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33
 
34
- model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
 
35
  pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
36
  pipe = pipe.to(device)
37
 
38
  # Updated function for text generation using the new API structure
39
  def generate_creative_text(prompt):
40
- try:
41
- response = openai.ChatCompletion.create(
42
- model="gpt-3.5-turbo", # Change this to the model you prefer, e.g., "gpt-4" if available
43
- messages=[
44
- {"role": "system", "content": "You are a creative assistant."},
45
- {"role": "user", "content": prompt}
46
- ],
47
- max_tokens=1024,
48
- temperature=0.7,
49
- )
50
- return response['choices'][0]['message']['content'].strip()
51
- except Exception as e:
52
- return f"An error occurred during text generation: {str(e)}"
53
 
54
 
55
  def process_audio(audio_path, image_option, creative_text_option):
 
31
  # Set device: CUDA if available, else CPU
32
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33
 
34
+ model_id1 = os.getenv("API_KEY")
35
+ # model_id1 = "dreamlike-art/dreamlike-diffusion-1.0"
36
  pipe = StableDiffusionPipeline.from_pretrained(model_id1, torch_dtype=torch.float16, use_safetensors=True)
37
  pipe = pipe.to(device)
38
 
39
  # Updated function for text generation using the new API structure
40
  def generate_creative_text(prompt):
41
+ chat_completion = client.chat.completions.create(
42
+ messages=[
43
+ {"role": "user", "content":prompt}
44
+ ],
45
+ model="llama-3.2-90b-text-preview"
46
+ )
47
+ chatbot_response = chat_completion.choices[0].message.content
48
+ return chatbot_response
 
 
 
 
 
49
 
50
 
51
  def process_audio(audio_path, image_option, creative_text_option):