engrphoenix commited on
Commit
683c54c
·
verified ·
1 Parent(s): 0ce2567

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -38,10 +38,10 @@ def compute_embeddings(text_chunks):
38
  messages=[{"role": "user", "content": chunk}],
39
  model="llama3-70b-8192"
40
  )
41
- # Convert response to NumPy array
42
- embedding_str = response['choices'][0]['message']['content']
43
- embedding = np.fromstring(embedding_str, sep=",")
44
- embeddings.append(embedding)
45
  return np.array(embeddings)
46
 
47
  # Function to build FAISS index
@@ -62,7 +62,8 @@ def generate_professional_content_groq(topic):
62
  messages=[{"role": "user", "content": f"Explain '{topic}' in bullet points, highlighting key concepts, examples, and applications for electrical engineering students."}],
63
  model="llama3-70b-8192"
64
  )
65
- return response['choices'][0]['message']['content'].strip()
 
66
 
67
  # Function to compute query embedding using Groq's Llama3-70B-8192 model
68
  def compute_query_embedding(query):
@@ -70,9 +71,9 @@ def compute_query_embedding(query):
70
  messages=[{"role": "user", "content": query}],
71
  model="llama3-70b-8192"
72
  )
73
- # Convert to NumPy array
74
- embedding_str = response['choices'][0]['message']['content']
75
- return np.fromstring(embedding_str, sep=",").reshape(1, -1)
76
 
77
  # Streamlit app
78
  st.title("Generative AI for Electrical Engineering Education with FAISS and Groq")
 
38
  messages=[{"role": "user", "content": chunk}],
39
  model="llama3-70b-8192"
40
  )
41
+ # Access the embedding content from the response
42
+ embedding = response.choices[0].message.content
43
+ embedding_array = np.fromstring(embedding, sep=",") # Convert string to NumPy array
44
+ embeddings.append(embedding_array)
45
  return np.array(embeddings)
46
 
47
  # Function to build FAISS index
 
62
  messages=[{"role": "user", "content": f"Explain '{topic}' in bullet points, highlighting key concepts, examples, and applications for electrical engineering students."}],
63
  model="llama3-70b-8192"
64
  )
65
+ # Access content from the response
66
+ return response.choices[0].message.content.strip()
67
 
68
  # Function to compute query embedding using Groq's Llama3-70B-8192 model
69
  def compute_query_embedding(query):
 
71
  messages=[{"role": "user", "content": query}],
72
  model="llama3-70b-8192"
73
  )
74
+ # Access embedding content and convert it to a NumPy array
75
+ embedding = response.choices[0].message.content
76
+ return np.fromstring(embedding, sep=",").reshape(1, -1)
77
 
78
  # Streamlit app
79
  st.title("Generative AI for Electrical Engineering Education with FAISS and Groq")