Jofthomas HF staff commited on
Commit
07631e1
1 Parent(s): 60cd96b

Upload 7 files

Browse files
Files changed (7) hide show
  1. Dockerfile +9 -0
  2. README.md +8 -6
  3. TextGen/ConfigEnv.py +19 -0
  4. TextGen/__init__.py +7 -0
  5. TextGen/router.py +53 -0
  6. app.py +1 -0
  7. requirements.txt +7 -0
Dockerfile ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.9
2
+
3
+ COPY . .
4
+
5
+ WORKDIR /
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /requirements.txt
8
+
9
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,13 @@
1
  ---
2
- title: EvBackend
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: pink
6
  sdk: docker
 
7
  pinned: false
8
- license: mit
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: TextGen
3
+ emoji: 📝
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
  sdk: docker
7
+ app_file: app.py
8
  pinned: false
 
9
  ---
10
 
11
+ # FastAPI-Deployment
12
+
13
+ Simple Text Generation app using Cohere generate a model to demonstrate deploying FastAPI Applications on Huggingface Spaces via Docker and Github Actions
TextGen/ConfigEnv.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Config class for handling env variables.
2
+ """
3
+ from functools import lru_cache
4
+ from pydantic import BaseSettings
5
+
6
+ class Settings(BaseSettings):
7
+ APP_ID: str
8
+ USER_ID: str
9
+ MODEL_ID: str
10
+ CLARIFAI_PAT: str
11
+ MODEL_VERSION_ID: str
12
+
13
+ class Config:
14
+ env_file = '.env'
15
+
16
+ @lru_cache()
17
+ def get_settings():
18
+ return Settings()
19
+ config = get_settings()
TextGen/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+
3
+ app = FastAPI(title="Deploying FastAPI Apps on Huggingface")
4
+
5
+ from TextGen import router
6
+
7
+
TextGen/router.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ from .ConfigEnv import config
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+
6
+ from langchain.llms import Clarifai
7
+ from langchain.chains import LLMChain
8
+ from langchain.prompts import PromptTemplate
9
+
10
+ from TextGen import app
11
+
12
+ class Generate(BaseModel):
13
+ text:str
14
+
15
+ def generate_text(prompt: str):
16
+ if prompt == "":
17
+ return {"detail": "Please provide a prompt."}
18
+ else:
19
+ prompt = PromptTemplate(template=prompt, input_variables=['Prompt'])
20
+
21
+ llm = Clarifai(
22
+ pat = config.CLARIFAI_PAT,
23
+ user_id = config.USER_ID,
24
+ app_id = config.APP_ID,
25
+ model_id = config.MODEL_ID,
26
+ model_version_id=config.MODEL_VERSION_ID,
27
+ )
28
+
29
+ llmchain = LLMChain(
30
+ prompt=prompt,
31
+ llm=llm
32
+ )
33
+
34
+ llm_response = llmchain.run({"Prompt": prompt})
35
+ return Generate(text=llm_response)
36
+
37
+
38
+
39
+ app.add_middleware(
40
+ CORSMiddleware,
41
+ allow_origins=["*"],
42
+ allow_credentials=True,
43
+ allow_methods=["*"],
44
+ allow_headers=["*"],
45
+ )
46
+
47
+ @app.get("/", tags=["Home"])
48
+ def api_home():
49
+ return {'detail': 'Welcome to FastAPI TextGen Tutorial!'}
50
+
51
+ @app.post("/api/generate", summary="Generate text from prompt", tags=["Generate"], response_model=Generate)
52
+ def inference(input_prompt: str):
53
+ return generate_text(prompt=input_prompt)
app.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from TextGen import app
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi==0.99.1
2
+ uvicorn
3
+ requests
4
+ pydantic==1.10.12
5
+ langchain
6
+ clarifai
7
+ Pillow