File size: 1,871 Bytes
b89f196 4d64d89 b89f196 c70f382 b89f196 ecd9090 f024495 2eb1363 967efaf 0250d76 b89f196 6774313 b89f196 fac22d0 b89f196 c70f382 b89f196 a1505c8 b89f196 c70f382 a1505c8 b89f196 f024495 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import os
from langchain.memory import ConversationBufferMemory
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import AgentType, initialize_agent, Tool
from lang import G4F
from fastapi import FastAPI, Request
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from ImageCreator import generate_image_prodia
app = FastAPI()
app.add_middleware( # add the middleware
CORSMiddleware,
allow_credentials=True, # allow credentials
allow_origins=["*"], # allow all origins
allow_methods=["*"], # allow all methods
allow_headers=["*"], # allow all headers
)
google_api_key = os.environ["GOOGLE_API_KEY"]
cse_id = os.environ["GOOGLE_CSE_ID"]
model = os.environ['default_model']
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name ="Search" ,
func=search.run,
description="useful when you need to answer questions about current events"
),
]
llm = G4F(model=model)
agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
@app.get("/")
def gello():
return "Hello! My name is Linlada."
@app.post('/linlada')
async def hello_post(request: Request):
llm = G4F(model=model)
data = await request.json()
prompt = data['prompt']
chat = llm(prompt)
return chat
@app.post('/search')
async def searches(request: Request):
data = await request.json()
prompt = data['prompt']
response = agent_chain.run(input=prompt)
return response
@app.route("/", methods=["POST"])
def generate_image():
data = request.get_json()
prompt = data.get("prompt")
model = data.get("model")
sampler = data.get("sampler")
seed = int(data.get("seed"))
neg = data.get("neg")
response = generate_image_prodia(prompt, model, sampler, seed, neg)
return jsonify({"image": response})
|