SurajSingh commited on
Commit
24b4b92
·
1 Parent(s): 33b8880

Deployment file added

Browse files
.gitattributes CHANGED
@@ -6,6 +6,7 @@
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.t7 filter=lfs diff=lfs merge=lfs -text
10
  *.joblib filter=lfs diff=lfs merge=lfs -text
11
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y # Install Rust early
4
+
5
+ COPY requirements.txt requirements.txt
6
+ RUN pip install --upgrade pip
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ RUN apt-get update && apt-get install -y --no-install-recommends \
10
+ bzip2 \
11
+ g++ \
12
+ git \
13
+ graphviz \
14
+ libgl1-mesa-glx \
15
+ libhdf5-dev \
16
+ openmpi-bin \
17
+ wget \
18
+ python3-tk && \
19
+ rm -rf /var/lib/apt/lists/*
20
+
21
+ RUN useradd -m -u 1000 myuser
22
+ USER myuser
23
+
24
+ COPY --chown=myuser app app
25
+
26
+ # COPY . .
27
+
28
+ EXPOSE 8001
29
+
30
+ CMD ["python", "app/main.py"]
README.md CHANGED
@@ -1,11 +1,12 @@
1
  ---
2
- title: Gpt2 Email Generation
3
- emoji: 🏆
4
- colorFrom: blue
5
- colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
  license: mit
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Ai Chatbot Email Subject Generator
3
+ emoji: 👁
4
+ colorFrom: green
5
+ colorTo: purple
6
  sdk: docker
7
  pinned: false
8
  license: mit
9
+ app_port: 8001
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.0.1"
app/config.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import List
3
+
4
+ from pydantic import AnyHttpUrl, BaseSettings
5
+
6
+ class Settings(BaseSettings):
7
+ API_V1_STR: str = "/api/v1"
8
+
9
+ # Meta
10
+
11
+ # BACKEND_CORS_ORIGINS is a comma-separated list of origins
12
+ # e.g: http://localhost,http://localhost:4200,http://localhost:3000
13
+ BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = [
14
+ "http://localhost:3000", # type: ignore
15
+ "http://localhost:8000", # type: ignore
16
+ "https://localhost:3000", # type: ignore
17
+ "https://localhost:8000", # type: ignore
18
+ ]
19
+
20
+ PROJECT_NAME: str = "Recognition API"
21
+
22
+ class Config:
23
+ case_sensitive = True
24
+
25
+ settings = Settings()
app/main.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+ import os
4
+ sys.path.append(str(Path(__file__).resolve().parent.parent))
5
+ #print(sys.path)
6
+ from typing import Any
7
+ from fastapi import FastAPI, Request, APIRouter, Form
8
+ from fastapi.staticfiles import StaticFiles
9
+ from fastapi.templating import Jinja2Templates
10
+ from fastapi.middleware.cors import CORSMiddleware
11
+ from app.config import settings
12
+ from app import __version__
13
+ from app.pyfiles import subject_gen
14
+ import numpy as np
15
+ import pandas as pd
16
+ # from transformers import GPT2Tokenizer ,GPT2Model
17
+
18
+ current_path = os.path.dirname(os.path.abspath(__file__))
19
+
20
+ # model = GPT2Model.from_pretrained("gpt2")
21
+ # tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
22
+
23
+ # checkpoint1 = os.path.join(current_path, "pyfiles")
24
+ # checkpoint = os.path.join(checkpoint1, "gpt_tokenizer")
25
+ # model.save_pretrained(checkpoint)
26
+ # tokenizer.save_pretrained(checkpoint)
27
+
28
+ app = FastAPI(
29
+ title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
30
+ )
31
+ # app.mount("/static", StaticFiles(directory="app/static"), name="static")
32
+
33
+
34
+ # To access Templates directory
35
+ templates = Jinja2Templates(directory="app/templates")
36
+
37
+ email_inp = None
38
+ #################################### Home Page endpoints #################################################
39
+ @app.get("/")
40
+ async def root(request: Request):
41
+ return templates.TemplateResponse("index.html", {'request': request,})
42
+
43
+
44
+
45
+ @app.get("/email_input/")
46
+ async def email_input_root(request: Request):
47
+ print("hi im in email input" )
48
+ return templates.TemplateResponse("email_input.html", {'request': request,})
49
+
50
+
51
+
52
+ @app.post("/subject_generation/")
53
+ async def create_upload_files(request: Request , paragraphInput: str = Form(...)):
54
+ global email_inp
55
+ print("hi im in subject gen", paragraphInput )
56
+ print("########################",request)
57
+ input_email = paragraphInput
58
+ print('********************',input_email)
59
+ if 'text' in input_email.content_type:
60
+ contents = await input_email.read()
61
+
62
+ email_inp = 'app/static/' + input_email.filename
63
+
64
+ # Save the contents to the text file
65
+ with open(email_inp, 'w', encoding='utf-8') as f:
66
+ f.write(contents.decode('utf-8'))
67
+ print('********************',email_inp)
68
+ email = ""
69
+ with open(email_inp, 'r', encoding='utf-8') as f:
70
+ email =f.read()
71
+ result = subject_gen.subject_gen_func(email)
72
+ print(result)
73
+
74
+ return templates.TemplateResponse("subject_generation.html", {"request": request,
75
+ "result":result,
76
+ "email":email,})
77
+
78
+
79
+ # Set all CORS enabled origins
80
+ if settings.BACKEND_CORS_ORIGINS:
81
+ app.add_middleware(
82
+ CORSMiddleware,
83
+ allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
84
+ allow_credentials=True,
85
+ allow_methods=["*"],
86
+ allow_headers=["*"],
87
+ )
88
+
89
+
90
+ # Start app
91
+ if __name__ == "__main__":
92
+ import uvicorn
93
+ uvicorn.run(app, host="0.0.0.0", port=8001)
app/pyfiles/gpt2_3epoch/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.36.0",
37
+ "use_cache": true,
38
+ "vocab_size": 50260
39
+ }
app/pyfiles/gpt2_3epoch/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.36.0"
6
+ }
app/pyfiles/gpt2_3epoch/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a05bd44466157c4b1e49430a2041682efd02563c1ad3db75aaf655b63cf35c9e
3
+ size 497783424
app/pyfiles/gpt2_3epoch/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32212753abe04ec447858f468942187b9608277b6254812afb5c6abb3a119fd6
3
+ size 995660293
app/pyfiles/gpt2_3epoch/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d42ae0a45828f61b6a23c6010b52b2e9e8c0c4f0e8e5804c63f017ff1c8bd68
3
+ size 14575
app/pyfiles/gpt2_3epoch/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed6905d5273c406da5dfd33b20f749017129ee92a89b048639acfac93faeb025
3
+ size 627
app/pyfiles/gpt2_3epoch/trainer_state.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.3250207813798838,
5
+ "eval_steps": 1000,
6
+ "global_step": 6000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.28,
13
+ "learning_rate": 4.65839800443459e-05,
14
+ "loss": 5.6144,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.55,
19
+ "learning_rate": 4.311945676274945e-05,
20
+ "loss": 3.3585,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.55,
25
+ "eval_loss": 3.1234283447265625,
26
+ "eval_runtime": 415.1946,
27
+ "eval_samples_per_second": 34.769,
28
+ "eval_steps_per_second": 4.347,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 0.83,
33
+ "learning_rate": 3.9654933481152996e-05,
34
+ "loss": 3.2619,
35
+ "step": 1500
36
+ },
37
+ {
38
+ "epoch": 1.11,
39
+ "learning_rate": 3.619041019955654e-05,
40
+ "loss": 3.1664,
41
+ "step": 2000
42
+ },
43
+ {
44
+ "epoch": 1.11,
45
+ "eval_loss": 2.9533257484436035,
46
+ "eval_runtime": 414.0939,
47
+ "eval_samples_per_second": 34.862,
48
+ "eval_steps_per_second": 4.359,
49
+ "step": 2000
50
+ },
51
+ {
52
+ "epoch": 1.39,
53
+ "learning_rate": 3.272588691796009e-05,
54
+ "loss": 3.0865,
55
+ "step": 2500
56
+ },
57
+ {
58
+ "epoch": 1.66,
59
+ "learning_rate": 2.9261363636363635e-05,
60
+ "loss": 3.0651,
61
+ "step": 3000
62
+ },
63
+ {
64
+ "epoch": 1.66,
65
+ "eval_loss": 2.849994659423828,
66
+ "eval_runtime": 413.8155,
67
+ "eval_samples_per_second": 34.885,
68
+ "eval_steps_per_second": 4.362,
69
+ "step": 3000
70
+ },
71
+ {
72
+ "epoch": 1.94,
73
+ "learning_rate": 2.5796840354767186e-05,
74
+ "loss": 3.0161,
75
+ "step": 3500
76
+ },
77
+ {
78
+ "epoch": 2.22,
79
+ "learning_rate": 2.2332317073170734e-05,
80
+ "loss": 2.9496,
81
+ "step": 4000
82
+ },
83
+ {
84
+ "epoch": 2.22,
85
+ "eval_loss": 2.7711386680603027,
86
+ "eval_runtime": 414.5048,
87
+ "eval_samples_per_second": 34.827,
88
+ "eval_steps_per_second": 4.355,
89
+ "step": 4000
90
+ },
91
+ {
92
+ "epoch": 2.49,
93
+ "learning_rate": 1.886779379157428e-05,
94
+ "loss": 2.9008,
95
+ "step": 4500
96
+ },
97
+ {
98
+ "epoch": 2.77,
99
+ "learning_rate": 1.5403270509977826e-05,
100
+ "loss": 2.9216,
101
+ "step": 5000
102
+ },
103
+ {
104
+ "epoch": 2.77,
105
+ "eval_loss": 2.719022750854492,
106
+ "eval_runtime": 413.676,
107
+ "eval_samples_per_second": 34.897,
108
+ "eval_steps_per_second": 4.363,
109
+ "step": 5000
110
+ },
111
+ {
112
+ "epoch": 3.05,
113
+ "learning_rate": 1.1938747228381375e-05,
114
+ "loss": 2.8984,
115
+ "step": 5500
116
+ },
117
+ {
118
+ "epoch": 3.33,
119
+ "learning_rate": 8.474223946784923e-06,
120
+ "loss": 2.8326,
121
+ "step": 6000
122
+ },
123
+ {
124
+ "epoch": 3.33,
125
+ "eval_loss": 2.6863553524017334,
126
+ "eval_runtime": 413.8185,
127
+ "eval_samples_per_second": 34.885,
128
+ "eval_steps_per_second": 4.362,
129
+ "step": 6000
130
+ }
131
+ ],
132
+ "logging_steps": 500,
133
+ "max_steps": 7216,
134
+ "num_input_tokens_seen": 0,
135
+ "num_train_epochs": 4,
136
+ "save_steps": 500,
137
+ "total_flos": 8000641580544000.0,
138
+ "train_batch_size": 4,
139
+ "trial_name": null,
140
+ "trial_params": null
141
+ }
app/pyfiles/gpt2_3epoch/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a328fb2002f110a8fb6b6bc9adb7284bf489e232bcc66216e1d9cf07892857eb
3
+ size 4283
app/pyfiles/gpt_tokenizer/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<email>": 50257,
3
+ "<subject>": 50258,
4
+ "[PAD]": 50259
5
+ }
app/pyfiles/gpt_tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
app/pyfiles/gpt_tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
app/pyfiles/gpt_tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "50257": {
14
+ "content": "<email>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": false
20
+ },
21
+ "50258": {
22
+ "content": "<subject>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": false
28
+ },
29
+ "50259": {
30
+ "content": "[PAD]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<|endoftext|>",
39
+ "clean_up_tokenization_spaces": true,
40
+ "eos_token": "<|endoftext|>",
41
+ "errors": "replace",
42
+ "model_max_length": 1024,
43
+ "pad_token": "[PAD]",
44
+ "tokenizer_class": "GPT2Tokenizer",
45
+ "unk_token": "<|endoftext|>"
46
+ }
app/pyfiles/gpt_tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
app/pyfiles/subject_gen.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import numpy as np
4
+ import torch
5
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer ,GPT2Model
6
+
7
+ current_path = os.path.dirname(os.path.abspath(__file__))
8
+ tokenizer_path = os.path.join(current_path, "gpt_tokenizer")
9
+ model_path = os.path.join(current_path, "gpt2_3epoch")
10
+ tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path) # also try gpt2-medium
11
+ model = GPT2LMHeadModel.from_pretrained(model_path)
12
+ extra_tokens = ["<email>", "<subject>"]
13
+ tokenizer.add_tokens(extra_tokens)
14
+ tokenizer.add_special_tokens({'pad_token': '[PAD]'})
15
+ model.resize_token_embeddings(len(tokenizer))
16
+
17
+ def subject_gen_func(email):
18
+ device = "cpu"
19
+ prompt = email
20
+ input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
21
+ attention_mask = torch.ones_like(input_ids)
22
+ pad_token_id = tokenizer.eos_token_id
23
+ output_ids = model.generate(input_ids, max_length=1024, num_return_sequences=1,attention_mask=attention_mask,
24
+ pad_token_id=pad_token_id)
25
+
26
+
27
+ generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
28
+ return generated_text.split("<subject>")[1].replace("<eos>","")
app/templates/email_input.html ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <title>Index</title>
5
+ </head>
6
+ <body>
7
+ <div>
8
+ <h1 style="background-color:LightGray;">
9
+ <center>Email Subject Line Generator </center>
10
+ </h1>
11
+ </div>
12
+ <div>
13
+ <fieldset>
14
+ <ul>
15
+ <!li>
16
+ <br>
17
+ <form action="/subject_generation/" enctype="multipart/form-data" method="post">
18
+ <span style="font-weight:bold;font-family:sans-serif">Enter Email:</span> <br><br>
19
+ <textarea name="paragraphInput" rows="5" cols="100"></textarea>
20
+ <br><br><br><br>
21
+ <button type="submit">Generate Subject</button>
22
+ </form>
23
+ <!/li>
24
+ <br><br>
25
+ <form action="/" method="get">
26
+ <button type="submit">Home</button>
27
+ </form>
28
+ </ul>
29
+ </fieldset>
30
+ </div>
31
+ </body>
32
+ </html>
app/templates/index.html ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <title>Index</title>
5
+ <style>
6
+ /* Add a style block for custom CSS */
7
+ .button-container {
8
+ text-align: center; /* Center the buttons */
9
+ }
10
+
11
+ .button-container form {
12
+ display: inline-block; /* Display forms (buttons) in a line */
13
+ margin: 0 10px; /* Add margin between buttons for spacing */
14
+ }
15
+ </style>
16
+ </head>
17
+ <body>
18
+ <div>
19
+ <h1 style="background-color:LightGray;">
20
+ <center>AI Chat Bot</center>
21
+ </h1>
22
+ </div>
23
+ <div>
24
+ <fieldset>
25
+ <ul>
26
+ <div>
27
+ <h2 style="background-color:white;">
28
+ <center>Select Task</center>
29
+ </h2>
30
+ </div>
31
+ <br><br><br>
32
+ <div class="button-container"> <!-- Wrap buttons in a container -->
33
+
34
+ <form action="{{ url_for('email_input_root') }}"><button>Email Subject Generator</button></form>
35
+ <form ><button>AI ML Question Answering Chatbot</button></form>
36
+
37
+ </div>
38
+ <br>
39
+ </ul>
40
+ </fieldset>
41
+ </div>
42
+ </body>
43
+ </html>
app/templates/subject_generation.html ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <title>Predict</title>
5
+ </head>
6
+ <body>
7
+ <div>
8
+ <h1 style="background-color:LightGray;">
9
+ <center>Subject Generation </center>
10
+ </h1>
11
+ </div>
12
+ <div>
13
+ <fieldset>
14
+
15
+ <h3><center><span style="font-weight:bold;font-family:sans-serif">Email:</span></Input></center></h3>
16
+ <p>
17
+ <center>
18
+ <span style="font-weight:bold;color:blue"> {{email}}</span>
19
+ </center>
20
+ </p>
21
+ <br><br>
22
+ <h3>
23
+ <center>
24
+ <span style="font-weight:bold;font-family:sans-serif">Subject:</span>
25
+ <span style="font-weight:bold;color:blue"> {{result}}</span>
26
+ </center>
27
+ </h3>
28
+ <br><br>
29
+ <form action="/email_input/" method="get">
30
+ <center><button type="submit">Check Another Input</button></center>
31
+ </form>
32
+ <br>
33
+ <form action="/" method="get">
34
+ <center><button type="submit">Home</button></center>
35
+ </form>
36
+ </fieldset>
37
+ </div>
38
+ </body>
39
+ </html>
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ uvicorn==0.17.6
2
+ fastapi==0.99.1
3
+ pydantic==1.10.10
4
+ requests==2.23.0
5
+ jinja2==3.1.2
6
+ python-multipart
7
+ numpy
8
+ pandas
9
+ setuptools-rust
10
+ transformers
11
+ accelerate
12
+ transformers[torch]
13
+
14
+