devin-ai commited on
Commit
575f0f7
·
verified ·
1 Parent(s): 2169742

Upload 7 files

Browse files
Files changed (7) hide show
  1. .env +1 -0
  2. .gitattributes +35 -35
  3. Dockerfile +11 -0
  4. README.md +10 -10
  5. main.py +89 -0
  6. requirements.txt +9 -0
  7. salaries.csv +0 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ COPY . .
10
+
11
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,10 @@
1
- ---
2
- title: Table
3
- emoji: 🌍
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: Backend
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
main.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ import google.generativeai as genai
4
+ import os
5
+ import requests
6
+ import pandas as pd
7
+ import validators
8
+ from sklearn.feature_extraction.text import TfidfVectorizer
9
+
10
+ from dotenv import load_dotenv
11
+ load_dotenv()
12
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
13
+ df=pd.read_csv("salaries.csv")
14
+
15
+ app = FastAPI()
16
+
17
+ origins=[
18
+ "http://localhost:5173",
19
+
20
+ ]
21
+
22
+ app.add_middleware(
23
+ CORSMiddleware,
24
+ allow_origins=origins,
25
+ allow_credentials=True,
26
+ allow_methods=['*'],
27
+ allow_headers=['*']
28
+ )
29
+
30
+ model=genai.GenerativeModel("gemini-1.5-flash")
31
+
32
+
33
+ df_combined = df.astype(str).apply(lambda x: ' '.join(x), axis=1).tolist()
34
+
35
+ vectorizer = TfidfVectorizer()
36
+ X = vectorizer.fit_transform(df_combined)
37
+
38
+ def genetate_gemini_content(prompt,content):
39
+ response=model.generate_content([prompt,content])
40
+ return response.text
41
+
42
+
43
+ work_year = df['work_year']
44
+ job_titles = df['job_title']
45
+ salaries = df['salary']
46
+ experience_level = df['experience_level']
47
+ employment_type = df['employment_type']
48
+ salary_in_usd = df['salary_in_usd']
49
+ company_size = df['company_size']
50
+
51
+ # Combine all columns as strings for vectorization (if needed)
52
+ df_combined = df.astype(str).apply(lambda x: ' '.join(x), axis=1).tolist()
53
+
54
+ # Use TF-IDF Vectorizer for embeddings
55
+ vectorizer = TfidfVectorizer()
56
+ X = vectorizer.fit_transform(df_combined)
57
+
58
+ prompt = f"""
59
+
60
+ I have a dataset with the following salary information:
61
+
62
+ Work Year: {work_year.tolist()}
63
+ Job Titles: {job_titles.tolist()}
64
+ Salaries (in USD): {salary_in_usd.tolist()}
65
+ Experience Level: {experience_level.tolist()}
66
+ Employment Type: {employment_type.tolist()}
67
+ Company Size: {company_size.tolist()}
68
+
69
+ Based on this data, can you answer the following question:
70
+
71
+
72
+ Please provide a short and direct answer based on the data. No extra explanations are needed, just the answer less than 2 lines and dont use "\ n" or any special charcher other than related to the data .
73
+ """
74
+
75
+ @app.get("/hi")
76
+ async def ee():
77
+ return {"de":"hehe"}
78
+
79
+ @app.post("/webqa")
80
+ async def webchat(question:str):
81
+ try:
82
+ response=model.generate_content([prompt,question])
83
+ print(response.text)
84
+ return response.text
85
+
86
+ except:
87
+ raise HTTPException(status_code=500, detail="Internal Server Error")
88
+
89
+
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ google-generativeai
3
+ python_dotenv
4
+ fastapi
5
+ uvicorn
6
+ requests
7
+ validators
8
+ scikit-learn
9
+ pandas
salaries.csv ADDED
The diff for this file is too large to render. See raw diff