File size: 2,039 Bytes
b89f196 4d64d89 b89f196 8fc3f8b b89f196 ecd9090 f024495 2eb1363 967efaf 0250d76 b89f196 6774313 b89f196 fac22d0 f2919d0 b89f196 c70f382 b89f196 a1505c8 b89f196 c70f382 a1505c8 b89f196 f2919d0 cc56cce 4f70fb9 2d3723a f2919d0 f29ef8b 7b5ac4f 2d3723a dfbec24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import os
from langchain.memory import ConversationBufferMemory
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import AgentType, initialize_agent, Tool
from lang import G4F
from fastapi import FastAPI, Request
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from ImageCreator import generate_image_prodia
app = FastAPI()
app.add_middleware( # add the middleware
CORSMiddleware,
allow_credentials=True, # allow credentials
allow_origins=["*"], # allow all origins
allow_methods=["*"], # allow all methods
allow_headers=["*"], # allow all headers
)
google_api_key = os.environ["GOOGLE_API_KEY"]
cse_id = os.environ["GOOGLE_CSE_ID"]
model = os.environ['default_model']
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name ="Search" ,
func=search.run,
description="useful when you need to answer questions about current events"
),
]
llm = G4F(model=model)
agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
@app.get("/")
def hello():
return "Hello! My name is Linlada."
@app.post('/linlada')
async def hello_post(request: Request):
llm = G4F(model=model)
data = await request.json()
prompt = data['prompt']
chat = llm(prompt)
return chat
@app.post('/search')
async def searches(request: Request):
data = await request.json()
prompt = data['prompt']
response = agent_chain.run(input=prompt)
return response
class User(BaseModel):
prompt: str
model: str
sampler: str
seed: int
neg: str = None
@app.post("/imagen")
def generate_image(request: User):
prompt = request.prompt
model = request.model
sampler = request.sampler
seed = request.seed
neg = request.neg
response = generate_image_prodia(prompt, model, sampler, seed, neg)
return {"image": response}
@app.post("/test")
def test(request: User):
return {'data': f'Prompt is {request.prompt} Model is {request.model}'}
|