myn0908 commited on
Commit
cc91688
Β·
1 Parent(s): dc97f58

Language Translator

Browse files
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .idea
2
+ .DS_Store
3
+ __pycache__
LanguageTranslator/model/openai_model.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import time
3
+ import math
4
+
5
+ class Model:
6
+ def make_text_prompt(self, text: str, target_language: str) -> str:
7
+ # Create a text prompt for translation
8
+ return f"Translate to {target_language}: {text}"
9
+
10
+ def translate_prompt(self, content, target_language: str) -> str:
11
+ # Generate a translation prompt based on the content type
12
+ if isinstance(content, (str, list)):
13
+ return self.make_text_prompt(content, target_language)
14
+
15
+ def make_request(self, prompt):
16
+ # Subclasses must implement this method to send the request
17
+ raise NotImplementedError("Subclasses must implement the make_request method")
18
+
19
+
20
+ class OpenAIModel(Model):
21
+ def __init__(self, model_name: str, api_key: str):
22
+ self.model_name = model_name
23
+ openai.api_key = api_key
24
+
25
+ def make_translation_request(self, prompt):
26
+ attempts = 0
27
+ while attempts < 3:
28
+ try:
29
+ if self.model_name == "gpt-3.5-turbo":
30
+ response = openai.ChatCompletion.create(
31
+ model=self.model_name,
32
+ messages=[
33
+ {"role": "user", "content": prompt}
34
+ ]
35
+ )
36
+ translation = response.choices[0].message['content'].strip()
37
+ else:
38
+ response = openai.ChatCompletion.create(
39
+ model=self.model_name,
40
+ prompt=prompt,
41
+ max_tokens=150,
42
+ temperature=0
43
+ )
44
+ translation = response.choices[0].text.strip()
45
+
46
+ return translation, True
47
+ except openai.error.RateLimitError:
48
+ attempts += 1
49
+ if attempts < 3:
50
+ wait_time = math.pow(2, attempts) # exponential backoff
51
+ print("Rate limit reached. Waiting for 60 seconds before retrying.")
52
+ time.sleep(60)
53
+ else:
54
+ raise Exception("Rate limit reached. Maximum attempts exceeded.")
55
+ return "", False
LanguageTranslator/test.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "text": "I am iron man, I love you",
3
+ "dest_language": "vi"
4
+ }
LanguageTranslator/utils/argument.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+
4
+ class ArgumentParser:
5
+ def __init__(self):
6
+ self.parser = argparse.ArgumentParser(description='Translate English to Vietnamese.')
7
+ self.parser.add_argument('--model_type', type=str, required=False, default='OpenAIModel',
8
+ help='The type of translation model to use. Choose between "GLMModel" and "OpenAIModel".')
9
+ self.parser.add_argument('--text', nargs='+', type=str, help='Input text(s) for translation.')
10
+ self.parser.add_argument('--dest_language', type=str, help='Target language for translation.')
11
+ self.parser.add_argument('--timeout', type=int, help='Timeout for the API request in seconds.')
12
+ self.parser.add_argument('--openai_model', type=str, required=False,default='gpt-3.5-turbo',
13
+ help='The model name of OpenAI Model. Required if model_type is "OpenAIModel".')
14
+ self.parser.add_argument('--openai_api_key', type=str, required=False,default='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',
15
+ help='The API key for OpenAIModel. Required if model_type is "OpenAIModel".')
16
+ self.parser.add_argument('--json', type=str, help='Path to a JSON file for input')
17
+
18
+ def parse_arguments(self):
19
+ args = self.parser.parse_args()
20
+ return args
README.md CHANGED
@@ -1,12 +1,36 @@
1
- ---
2
- title: Language Translation
3
- emoji: πŸŒ–
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 4.2.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Language Translator
2
+ ## Introduction
3
+ Open AI Translator is the Power tool to translate English to Vietnamese Language. Using the Large Language Model (LLM) like GPT-3.5-turbo, ... for translation
 
 
 
 
 
 
 
4
 
5
+ The project is built in Python Which include API by FastAPI and User Interface by Gradio
6
+
7
+ ## Getting Started
8
+ ### Environment Setup
9
+ 1. Clone the repository:
10
+ ```commandline
11
+ git clone [email protected]:TinVo0908/Language-Translator.git'
12
+ ```
13
+ 2. The project is require: Python 3.9+
14
+ 3. Install dependencies by:
15
+ ```
16
+ pip install -r requirements.txt
17
+ ```
18
+ ### How to Use
19
+ 1. For running with command line in terminal:
20
+ For input is text and dest_language
21
+ ```commandline
22
+ python test_local_translator.py --text 'your input text' --dest_language 'vi'
23
+ ```
24
+ For input is json file
25
+ ```commandline
26
+ python test_local_translator.py --json 'your json file'
27
+ ```
28
+ Noted: You can set your api key with argument --openai_api_key and set the model name with --openai_model or you can use api key and model with my default.
29
+ 2. For running API:
30
+ ```commandline
31
+ uvicorn api:app --reload
32
+ ```
33
+ 3. For running User Interface
34
+ ```commandline
35
+ python app.py
36
+ ```
api.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, Query, HTTPException
2
+ from starlette.responses import RedirectResponse
3
+ from typing import Union, List
4
+ from pydantic import BaseModel
5
+ from translator import ServerTranslator
6
+ import json
7
+ import uvicorn
8
+
9
+ app = FastAPI()
10
+
11
+
12
+ # Define a data model for the input
13
+ class TranslationInput(BaseModel):
14
+ text: str
15
+ dest_language: str
16
+
17
+
18
+ class TranslationResult(BaseModel):
19
+ text: Union[str, List[str]]
20
+ language_translation: str
21
+
22
+
23
+ @app.get("/", include_in_schema=False)
24
+ async def index():
25
+ return RedirectResponse(url="/docs")
26
+
27
+
28
+ @app.post("/translate", response_model=TranslationResult)
29
+ async def run_translation_manual(
30
+ text: str = Query(..., description="Input text to translate"),
31
+ dest_language: str = Query(..., description="Destination language")):
32
+ # Splitting the input text
33
+ text = text.split(',')
34
+ # Creating and processing the translator
35
+ processing_language = ServerTranslator.language_translator(
36
+ text=text,
37
+ dest_language=dest_language,
38
+ )
39
+ # Getting the translated result
40
+ result_response = processing_language.translate()
41
+ return result_response
42
+
43
+
44
+ @app.post("/translate_json", response_model=TranslationResult)
45
+ async def run_translation_auto(json_file: UploadFile):
46
+ try:
47
+ # Reading the JSON content from the file
48
+ json_content = await json_file.read()
49
+ json_data = json.loads(json_content.decode("utf-8"))
50
+ # Creating and processing the translator
51
+ processing_language = ServerTranslator.language_translator(
52
+ json_data
53
+ )
54
+ # Getting the translated result
55
+ result_response = processing_language.translate()
56
+ return result_response
57
+ except json.JSONDecodeError:
58
+ raise HTTPException(status_code=400, detail="Invalid JSON input")
59
+
60
+
61
+ if __name__ == "__main__":
62
+ uvicorn.run(app, host="0.0.0.0", port=8000)
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from translator import ServerTranslator
3
+ import json
4
+
5
+
6
+ def clear_all(input_json, input_text, input_dest_lang, translated_text):
7
+ return None, "", "", ""
8
+
9
+
10
+ def translate_text_json(input_json):
11
+ # Translation code for JSON input
12
+ try:
13
+ json_file_path = input_json.name
14
+ with open(json_file_path, 'r') as f:
15
+ file_content = f.read()
16
+ json_input = json.loads(file_content)
17
+ translation = ServerTranslator.language_translator(inputs_data=json_input).translate()
18
+ translate_text = translation['text']
19
+ return translate_text
20
+ except Exception as e:
21
+ translate_text = f"Error: {str(e)}"
22
+
23
+
24
+ def translate_text_text(input_text, input_dest_lang):
25
+ # Translation code for text input
26
+ try:
27
+ translation = ServerTranslator.language_translator(text=input_text, dest_language=input_dest_lang).translate()
28
+ translate_text = translation['text']
29
+ return translate_text
30
+ except Exception as e:
31
+ translate_text = f"Error: {str(e)}"
32
+
33
+
34
+ with gr.Blocks() as demo:
35
+ input_json = gr.components.File(label="Upload JSON file")
36
+ input_dest_lang = gr.components.Textbox(placeholder='Example input: vi',label="Destination Language")
37
+ input_text = gr.components.Textbox(placeholder='Example inputs: I love you, I love you than myself', label="Enter Text")
38
+ translated_text = gr.components.Textbox(placeholder='Example outputs: Anh yΓͺu em, Anh yΓͺu em hΖ‘n bαΊ£n thΓ’n mΓ¬nh',
39
+ label="Translated Text")
40
+
41
+ with gr.Column():
42
+ submit_json = gr.Button("Submit Json")
43
+ submit_text = gr.Button("Submit Text")
44
+ submit_json.click(
45
+ translate_text_json,
46
+ [input_json], # Pass all three inputs
47
+ [translated_text]
48
+ )
49
+ with gr.Column():
50
+ submit_text.click(
51
+ translate_text_text,
52
+ [input_text, input_dest_lang], # Pass all three inputs
53
+ [translated_text]
54
+ )
55
+ clear = gr.Button("Clear")
56
+ clear.click(
57
+ clear_all,
58
+ [input_json, input_text, input_dest_lang, translated_text],
59
+ [input_json, input_text, input_dest_lang, translated_text]
60
+ )
61
+
62
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohttp==3.8.6
3
+ aiosignal==1.3.1
4
+ altair==5.1.2
5
+ annotated-types==0.6.0
6
+ anyio==3.7.1
7
+ async-timeout==4.0.3
8
+ attrs==23.1.0
9
+ backoff==2.2.1
10
+ bcrypt==4.0.1
11
+ beautifulsoup4==4.12.2
12
+ cachetools==5.3.2
13
+ certifi==2023.7.22
14
+ chardet==5.2.0
15
+ charset-normalizer==3.3.2
16
+ chroma-hnswlib==0.7.3
17
+ chromadb==0.4.16
18
+ click==8.1.7
19
+ colorama==0.4.6
20
+ coloredlogs==15.0.1
21
+ contourpy==1.2.0
22
+ cycler==0.12.1
23
+ dataclasses-json==0.6.2
24
+ Deprecated==1.2.14
25
+ emoji==2.8.0
26
+ exceptiongroup==1.1.3
27
+ faiss-cpu==1.7.4
28
+ fastapi==0.104.1
29
+ ffmpy==0.3.1
30
+ filelock==3.13.1
31
+ filetype==1.2.0
32
+ flatbuffers==23.5.26
33
+ fonttools==4.44.0
34
+ fpdf==1.7.2
35
+ frozenlist==1.4.0
36
+ fsspec==2023.10.0
37
+ google-auth==2.23.4
38
+ googleapis-common-protos==1.61.0
39
+ gradio==3.45.2
40
+ gradio_client==0.5.3
41
+ grpcio==1.59.2
42
+ h11==0.14.0
43
+ httpcore==1.0.2
44
+ httptools==0.6.1
45
+ httpx==0.25.1
46
+ huggingface-hub==0.17.3
47
+ humanfriendly==10.0
48
+ idna==3.4
49
+ importlib-metadata==6.8.0
50
+ importlib-resources==6.1.1
51
+ install==1.3.5
52
+ Jinja2==3.1.2
53
+ joblib==1.3.2
54
+ jsonpatch==1.33
55
+ jsonpointer==2.4
56
+ jsonschema==4.19.2
57
+ jsonschema-specifications==2023.7.1
58
+ kiwisolver==1.4.5
59
+ kubernetes==28.1.0
60
+ langchain==0.0.334
61
+ langdetect==1.0.9
62
+ langsmith==0.0.63
63
+ lxml==4.9.3
64
+ markdown-it-py==3.0.0
65
+ MarkupSafe==2.1.3
66
+ marshmallow==3.20.1
67
+ matplotlib==3.8.1
68
+ mdurl==0.1.2
69
+ monotonic==1.6
70
+ mpmath==1.3.0
71
+ multidict==6.0.4
72
+ mypy-extensions==1.0.0
73
+ nltk==3.8.1
74
+ numpy==1.26.1
75
+ oauthlib==3.2.2
76
+ onnxruntime==1.16.2
77
+ openai==0.27.3
78
+ opentelemetry-api==1.21.0
79
+ opentelemetry-exporter-otlp-proto-common==1.21.0
80
+ opentelemetry-exporter-otlp-proto-grpc==1.21.0
81
+ opentelemetry-proto==1.21.0
82
+ opentelemetry-sdk==1.21.0
83
+ opentelemetry-semantic-conventions==0.42b0
84
+ orjson==3.9.10
85
+ overrides==7.4.0
86
+ packaging==23.2
87
+ pandas==2.1.2
88
+ Pillow==10.1.0
89
+ posthog==3.0.2
90
+ protobuf==4.25.0
91
+ pulsar-client==3.3.0
92
+ pyasn1==0.5.0
93
+ pyasn1-modules==0.3.0
94
+ pydantic==2.4.2
95
+ pydantic_core==2.10.1
96
+ pydub==0.25.1
97
+ Pygments==2.16.1
98
+ pyparsing==3.1.1
99
+ PyPika==0.48.9
100
+ python-dateutil==2.8.2
101
+ python-dotenv==1.0.0
102
+ python-iso639==2023.6.15
103
+ python-magic==0.4.27
104
+ python-multipart==0.0.6
105
+ pytz==2023.3.post1
106
+ PyYAML==6.0.1
107
+ rapidfuzz==3.5.2
108
+ referencing==0.30.2
109
+ regex==2023.10.3
110
+ requests==2.31.0
111
+ requests-oauthlib==1.3.1
112
+ rich==13.6.0
113
+ rpds-py==0.12.0
114
+ rsa==4.9
115
+ semantic-version==2.10.0
116
+ shellingham==1.5.4
117
+ six==1.16.0
118
+ sniffio==1.3.0
119
+ soupsieve==2.5
120
+ SQLAlchemy==2.0.23
121
+ starlette==0.27.0
122
+ sympy==1.12
123
+ tabulate==0.9.0
124
+ tenacity==8.2.3
125
+ tiktoken==0.5.1
126
+ tokenizers==0.14.1
127
+ tomlkit==0.12.0
128
+ toolz==0.12.0
129
+ tqdm==4.66.1
130
+ typer==0.9.0
131
+ typing-inspect==0.9.0
132
+ typing_extensions==4.8.0
133
+ tzdata==2023.3
134
+ unstructured==0.10.29
135
+ urllib3==1.26.18
136
+ uvicorn==0.24.0.post1
137
+ uvloop==0.19.0
138
+ watchfiles==0.21.0
139
+ websocket-client==1.6.4
140
+ websockets==11.0.3
141
+ wrapt==1.16.0
142
+ yarl==1.9.2
143
+ zipp==3.17.0
test_local_translator.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from translator import LocalTranslator
2
+ process = LocalTranslator.language_translator()
3
+ process.translate()
translator.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ from LanguageTranslator.utils.argument import ArgumentParser
4
+ from LanguageTranslator.model.openai_model import OpenAIModel
5
+ from fastapi import HTTPException
6
+
7
+
8
+ class LocalTranslator:
9
+ def __init__(self, open_ai, inputs_data):
10
+ # Initialize the LocalTranslator with OpenAI and inputs data
11
+ self.open_ai = open_ai
12
+ self.inputs_data = inputs_data
13
+
14
+ def translate(self):
15
+ # Translates the provided text into the destination language
16
+ translations = []
17
+ result_response = {
18
+ 'text': '',
19
+ 'language_translation': ''
20
+ }
21
+
22
+ if 'text' not in self.inputs_data or 'dest_language' not in self.inputs_data:
23
+ print("Required fields 'text' and 'dest_language' are missing in input data.")
24
+ return translations, ''
25
+
26
+ text = self.inputs_data['text']
27
+ dest_language = self.inputs_data['dest_language']
28
+
29
+ if not isinstance(dest_language, str):
30
+ print("Destination language is not a single text.")
31
+ return translations, dest_language
32
+
33
+ if isinstance(text, str):
34
+ text = [text]
35
+
36
+ if not isinstance(text, list):
37
+ print("Input data is not a text or a list of text")
38
+ return translations, dest_language
39
+
40
+ start_time = time.time()
41
+ for t in text:
42
+ prompt = self.open_ai.translate_prompt(t, dest_language)
43
+ translation, _ = self.open_ai.make_translation_request(prompt)
44
+ translations.append(translation)
45
+ end_time = time.time()
46
+
47
+ execution_time = end_time - start_time
48
+ result_response['text'] = translations if len(translations) > 1 else translations[0]
49
+ result_response['language_translation'] = dest_language
50
+ print(f"Translation completed in {execution_time:.2f} seconds πŸŽ‰πŸŽ‰πŸŽ‰\nTranslated: {result_response}")
51
+ return result_response
52
+
53
+ @classmethod
54
+ def load_parser(cls):
55
+ # Loads the parser object for argument parsing
56
+ argument_parser = ArgumentParser()
57
+ args = argument_parser.parse_arguments()
58
+ return args
59
+
60
+ @classmethod
61
+ def language_translator(cls):
62
+ # Builds the language translator using the provided arguments
63
+ args = cls.load_parser()
64
+ if args.model_type == "OpenAIModel":
65
+ if args.openai_model and args.openai_api_key:
66
+ model_name = args.openai_model
67
+ api_key = args.openai_api_key
68
+ else:
69
+ raise ValueError("Invalid OpenAI model or API key")
70
+ model = OpenAIModel(model_name=model_name, api_key=api_key)
71
+ else:
72
+ raise ValueError("Invalid OpenAIModel specified.")
73
+
74
+ if args.json:
75
+ with open(args.json, 'r') as j:
76
+ input_data = json.load(j)
77
+ else:
78
+ input_data = {
79
+ "text": args.text,
80
+ "dest_language": args.dest_language
81
+ }
82
+
83
+ return cls(open_ai=model, inputs_data=input_data)
84
+
85
+
86
+ class ServerTranslator:
87
+ def __init__(self, open_ai, inputs_data):
88
+ # Initialize the ServerTranslator with OpenAI and inputs data
89
+ self.open_ai = open_ai
90
+ self.inputs_data = inputs_data
91
+
92
+ def translate(self):
93
+ # Translates the given text into the destination language
94
+ translations = []
95
+ result_response = {
96
+ 'text': '',
97
+ 'language_translation': ''
98
+ }
99
+
100
+ if 'text' not in self.inputs_data or 'dest_language' not in self.inputs_data:
101
+ raise HTTPException(status_code=400,
102
+ detail="Required fields 'text' and 'dest_language' are missing in input data.")
103
+
104
+ text = self.inputs_data['text']
105
+ dest_language = self.inputs_data['dest_language']
106
+
107
+ if not isinstance(dest_language, str):
108
+ raise HTTPException(status_code=400, detail="Destination language is not a single text.")
109
+
110
+ if isinstance(text, str):
111
+ text = text.split(',')
112
+ text = [text]
113
+
114
+ if not isinstance(text, list):
115
+ raise HTTPException(status_code=400, detail="Input data is not a text or a list of text.")
116
+
117
+ start_time = time.time()
118
+ for t in text:
119
+ prompt = self.open_ai.translate_prompt(t, dest_language)
120
+ translation, _ = self.open_ai.make_translation_request(prompt)
121
+ translations.append(translation)
122
+ end_time = time.time()
123
+ execution_time = end_time - start_time
124
+ result_response['text'] = translations if len(translations) > 1 else translations[0]
125
+ result_response['language_translation'] = dest_language
126
+ print(f"Translation completed in {execution_time:.2f} seconds πŸŽ‰πŸŽ‰πŸŽ‰\nTranslated: {result_response}")
127
+ return result_response
128
+
129
+ @classmethod
130
+ def language_translator(cls, inputs_data=None, text=None, dest_language=None,
131
+ model_type='OpenAIModel', openai_model='gpt-3.5-turbo',
132
+ openai_api_key='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'):
133
+ # Builds the language translator using the provided arguments
134
+ if model_type == "OpenAIModel":
135
+ if openai_model and openai_api_key:
136
+ model_name = openai_model
137
+ api_key = openai_api_key
138
+ else:
139
+ raise HTTPException(status_code=400, detail="Invalid OpenAI model or API key")
140
+ model = OpenAIModel(model_name=model_name, api_key=api_key)
141
+ else:
142
+ raise HTTPException(status_code=400, detail="Invalid OpenAIModel specified.")
143
+
144
+ if inputs_data:
145
+ input_data = {
146
+ "text": inputs_data['text'].split(','),
147
+ "dest_language": inputs_data['dest_language']
148
+ }
149
+ else:
150
+ input_data = {
151
+ "text": text,
152
+ "dest_language": dest_language
153
+ }
154
+ return cls(open_ai=model, inputs_data=input_data)
155
+
156
+
157
+ # if __name__ == "__main__":
158
+ # process = LocalTranslator.language_translator()
159
+ # process.translate()