|
|
|
|
|
import requests |
|
import streamlit as st |
|
import openai |
|
from openai import OpenAI |
|
import os |
|
from dotenv import load_dotenv |
|
import numpy as np |
|
import pandas as pd |
|
import csv |
|
import tempfile |
|
from tempfile import NamedTemporaryFile |
|
import pathlib |
|
from pathlib import Path |
|
import re |
|
from re import sub |
|
import matplotlib.pyplot as plt |
|
from itertools import product |
|
from tqdm import tqdm_notebook, tqdm, trange |
|
import time |
|
from time import sleep |
|
import pretty_errors |
|
import seaborn as sns |
|
from matplotlib.pyplot import style |
|
from rich import print |
|
import warnings |
|
warnings.filterwarnings('ignore') |
|
|
|
load_dotenv() |
|
|
|
os.environ["OPENAI_API_KEY"] = os.environ['user_token'] |
|
openai.api_key = os.environ['user_token'] |
|
|
|
|
|
def chatgpt(user_prompt, sys_prompt="You are professional consultant", openai_model="gpt-3.5-turbo-16k"): |
|
|
|
try: |
|
openai_client = OpenAI() |
|
chatgpt_response = openai_client.chat.completions.create( |
|
model=openai_model, |
|
messages=[ |
|
{"role": "system", "content": sys_prompt}, |
|
{"role": "user", "content": user_prompt}, |
|
], |
|
stream=False, |
|
) |
|
|
|
full_response = chatgpt_response.choices[0].message.content |
|
print(full_response) |
|
except Exception as e: |
|
print(e) |
|
full_response = "Sorry, I don't have an answer for that yet." |
|
|
|
|
|
return full_response |
|
|
|
|
|
|
|
|
|
|