File size: 2,111 Bytes
b89f196 4d64d89 b89f196 1e9e9c1 b89f196 ecd9090 f024495 2eb1363 967efaf 0250d76 b89f196 6774313 b89f196 fac22d0 f2919d0 b89f196 c70f382 b89f196 a1505c8 b89f196 c70f382 a1505c8 b89f196 89b50e0 f024495 f2919d0 cc56cce 4f70fb9 1e9e9c1 f2919d0 f29ef8b 7b5ac4f 4460e18 dfbec24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import os
from langchain.memory import ConversationBufferMemory
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import AgentType, initialize_agent, Tool
from lang import G4F
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from ImageCreator import generate_image_prodia
app = FastAPI()
app.add_middleware( # add the middleware
CORSMiddleware,
allow_credentials=True, # allow credentials
allow_origins=["*"], # allow all origins
allow_methods=["*"], # allow all methods
allow_headers=["*"], # allow all headers
)
google_api_key = os.environ["GOOGLE_API_KEY"]
cse_id = os.environ["GOOGLE_CSE_ID"]
model = os.environ['default_model']
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name ="Search" ,
func=search.run,
description="useful when you need to answer questions about current events"
),
]
llm = G4F(model=model)
agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
@app.get("/")
def hello():
return "Hello! My name is Linlada."
@app.post('/linlada')
async def hello_post(request: Request):
llm = G4F(model=model)
data = await request.json()
prompt = data['prompt']
chat = llm(prompt)
return chat
@app.post('/search')
async def searches(request: Request):
data = await request.json()
prompt = data['prompt']
response = agent_chain.run(input=prompt)
return response
# @app.post("/imagen")
# async def generate_image(request: Request):
# data = await request.json()
# prompt = data['prompt']
# model = data.get["model"]
# sampler = data.get["sampler"]
# seed = int(data.get["seed"])
# neg = data.get["neg"]
# response = generate_image_prodia(prompt, model, sampler, seed, neg)
# return jsonify({"image": response})
class User(BaseModel):
prompt: str
model: str
sampler: str
seed: int
neg: str = None
tests = {}
@app.post("/test")
def test(request: User):
return {'data': f'Prompt is {request.prompt}'}
|