Imageye commited on
Commit
f5db02a
·
verified ·
1 Parent(s): 053db8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -23
app.py CHANGED
@@ -4,22 +4,23 @@ import re
4
  import tempfile
5
  import os
6
  import warnings
7
- from groq import GroqClient
8
  from transformers import pipeline
 
9
 
10
  # Suppress specific warning
11
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
12
 
13
- # Set up Groq client
14
- client = GroqClient(api_key=os.environ.get("GROQ_API_KEY"))
15
 
16
- # Instantiate Hugging Face pipeline
17
- transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2")
18
 
19
- # Function to transcribe audio using Hugging Face Transformers
20
  def transcribe_audio(file_path):
21
- result = transcriber(file_path)
22
- text = result["text"]
23
  return text
24
 
25
  # Function to get transcript from YouTube
@@ -35,19 +36,30 @@ def get_transcript(url):
35
  except Exception as e:
36
  return str(e)
37
 
 
 
 
 
 
 
 
 
 
 
38
  # Function to summarize text using Groq API
39
  def summarize_text(text):
40
  try:
41
- response = client.completions.create(
42
- model="llama3-8b-8192",
43
- messages=[
44
  {
45
  "role": "user",
46
  "content": f"Summarize the following text:\n\n{text}"
47
  }
48
  ]
49
- )
50
- summary = response.choices[0].message["content"].strip()
 
51
  return summary
52
  except Exception as e:
53
  return f"Error summarizing text: {e}"
@@ -55,16 +67,17 @@ def summarize_text(text):
55
  # Function to generate quiz questions using Groq API
56
  def generate_quiz_questions(text):
57
  try:
58
- response = client.completions.create(
59
- model="llama3-8b-8192",
60
- messages=[
61
  {
62
  "role": "user",
63
  "content": f"Generate quiz questions for the following text:\n\n{text}"
64
  }
65
  ]
66
- )
67
- quiz_questions = response.choices[0].message["content"].strip()
 
68
  return quiz_questions
69
  except Exception as e:
70
  return f"Error generating quiz questions: {e}"
@@ -106,16 +119,17 @@ def parse_quiz_questions(quiz_text):
106
  # Function to generate explanation for quiz answers using Groq API
107
  def generate_explanation(question, correct_answer, user_answer):
108
  try:
109
- response = client.completions.create(
110
- model="llama3-8b-8192",
111
- messages=[
112
  {
113
  "role": "user",
114
  "content": f"Explain why the correct answer to the following question is '{correct_answer}' and not '{user_answer}':\n\n{question}"
115
  }
116
  ]
117
- )
118
- explanation = response.choices[0].message["content"].strip()
 
119
  return explanation
120
  except Exception as e:
121
  return f"Error generating explanation: {e}"
 
4
  import tempfile
5
  import os
6
  import warnings
7
+ import requests
8
  from transformers import pipeline
9
+ from whisper_jax import FlaxWhisperPipeline
10
 
11
  # Suppress specific warning
12
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
13
 
14
+ # Set up Groq API key
15
+ GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
16
 
17
+ # Instantiate FlaxWhisperPipeline
18
+ pipeline = FlaxWhisperPipeline("openai/whisper-large-v2")
19
 
20
+ # Function to transcribe audio using FlaxWhisperPipeline
21
  def transcribe_audio(file_path):
22
+ # JIT compile the forward call - slow, but we only do once
23
+ text = pipeline(file_path)
24
  return text
25
 
26
  # Function to get transcript from YouTube
 
36
  except Exception as e:
37
  return str(e)
38
 
39
+ # Function to make a request to the Groq API
40
+ def groq_request(payload):
41
+ headers = {
42
+ "Authorization": f"Bearer {GROQ_API_KEY}",
43
+ "Content-Type": "application/json"
44
+ }
45
+ response = requests.post("https://api.groq.com/v1/endpoint", json=payload, headers=headers)
46
+ response.raise_for_status()
47
+ return response.json()
48
+
49
  # Function to summarize text using Groq API
50
  def summarize_text(text):
51
  try:
52
+ payload = {
53
+ "model": "llama3-8b-8192",
54
+ "messages": [
55
  {
56
  "role": "user",
57
  "content": f"Summarize the following text:\n\n{text}"
58
  }
59
  ]
60
+ }
61
+ response = groq_request(payload)
62
+ summary = response['choices'][0]['message']['content'].strip()
63
  return summary
64
  except Exception as e:
65
  return f"Error summarizing text: {e}"
 
67
  # Function to generate quiz questions using Groq API
68
  def generate_quiz_questions(text):
69
  try:
70
+ payload = {
71
+ "model": "llama3-8b-8192",
72
+ "messages": [
73
  {
74
  "role": "user",
75
  "content": f"Generate quiz questions for the following text:\n\n{text}"
76
  }
77
  ]
78
+ }
79
+ response = groq_request(payload)
80
+ quiz_questions = response['choices'][0]['message']['content'].strip()
81
  return quiz_questions
82
  except Exception as e:
83
  return f"Error generating quiz questions: {e}"
 
119
  # Function to generate explanation for quiz answers using Groq API
120
  def generate_explanation(question, correct_answer, user_answer):
121
  try:
122
+ payload = {
123
+ "model": "llama3-8b-8192",
124
+ "messages": [
125
  {
126
  "role": "user",
127
  "content": f"Explain why the correct answer to the following question is '{correct_answer}' and not '{user_answer}':\n\n{question}"
128
  }
129
  ]
130
+ }
131
+ response = groq_request(payload)
132
+ explanation = response['choices'][0]['message']['content'].strip()
133
  return explanation
134
  except Exception as e:
135
  return f"Error generating explanation: {e}"