|
from io import BytesIO |
|
|
|
from langchain_openai import ChatOpenAI |
|
from openai import OpenAI |
|
import tiktoken |
|
import os |
|
|
|
def n_of_questions(): |
|
n_of_questions = 25 |
|
return n_of_questions |
|
|
|
openai_api_key = os.environ.get("openai_api_key") |
|
|
|
model = "gpt-4o-mini" |
|
|
|
def load_model(openai_api_key): |
|
return ChatOpenAI( |
|
model_name=model, |
|
openai_api_key=openai_api_key, |
|
temperature=0.1, |
|
top_p=0.85 |
|
) |
|
|
|
|
|
client = OpenAI(api_key=openai_api_key) |
|
|
|
import os |
|
|
|
|
|
def convert_text_to_speech(text, output): |
|
try: |
|
|
|
response = client.audio.speech.create(model="tts-1", voice="alloy", input=text) |
|
|
|
if isinstance(output, BytesIO): |
|
|
|
for chunk in response.iter_bytes(): |
|
output.write(chunk) |
|
else: |
|
|
|
with open(output, 'wb') as f: |
|
for chunk in response.iter_bytes(): |
|
f.write(chunk) |
|
|
|
except Exception as e: |
|
print(f"An error occurred: {e}") |
|
|
|
response = client.audio.speech.create(model="tts-1", voice="alloy", input='Here is my Report!') |
|
|
|
if isinstance(output, BytesIO): |
|
for chunk in response.iter_bytes(): |
|
output.write(chunk) |
|
else: |
|
with open(output, 'wb') as f: |
|
for chunk in response.iter_bytes(): |
|
f.write(chunk) |