Hachiru commited on
Commit
a776bf7
·
1 Parent(s): 99ad937

Update from GitHub Actions

Browse files
Files changed (18) hide show
  1. README.md +1 -12
  2. hf_space/.github/workflows/main.yml +1 -1
  3. hf_space/app.py +10 -9
  4. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +74 -11
  5. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +2 -2
  6. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +1 -1
  7. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/main.yml +9 -1
  8. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +2 -2
  9. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +2 -2
  10. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +5 -5
  11. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/main.yml +26 -0
  12. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +18 -0
  13. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitattributes +35 -0
  14. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +12 -0
  15. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt +4 -0
  16. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt +2 -1
  17. hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt +2 -0
  18. hf_space/requirements.txt +0 -2
README.md CHANGED
@@ -1,12 +1 @@
1
- ---
2
- title: Chigas
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 5.20.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ https://huggingface.co/spaces/Hachiru/chigas
 
 
 
 
 
 
 
 
 
 
 
hf_space/.github/workflows/main.yml CHANGED
@@ -18,7 +18,7 @@ jobs:
18
 
19
  - name: Install dependencies
20
  run: |
21
- pip install huggingface_hub
22
 
23
  - name: Log in to Hugging Face
24
  run: |
 
18
 
19
  - name: Install dependencies
20
  run: |
21
+ pip install huggingface_hub
22
 
23
  - name: Log in to Hugging Face
24
  run: |
hf_space/app.py CHANGED
@@ -1,16 +1,17 @@
1
  import torch
2
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
  import gradio as gr
4
- import scispacy
5
- import spacy
6
 
7
  # Load GPT-2 model and tokenizer
8
  MODEL_NAME = "gpt2"
9
  tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
10
  model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
11
 
12
- # Load SciSpacy model for medical NLP
13
- nlp = spacy.load("en_core_sci_md")
 
 
 
14
 
15
  # Conversation memory
16
  conversation_history = []
@@ -44,11 +45,11 @@ def generate_text(prompt, max_length=100, temperature=0.7, top_k=50, top_p=0.9,
44
  for word in stopwords.split(","):
45
  text = text.replace(word.strip(), "")
46
 
47
- # Extract medical entities
48
  if extract_entities:
49
- doc = nlp(text)
50
- entities = [(ent.text, ent.label_) for ent in doc.ents]
51
- text += "\n\nExtracted Medical Entities:\n" + "\n".join([f"{e[0]} ({e[1]})" for e in entities])
52
 
53
  responses.append(text)
54
 
 
1
  import torch
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoModelForTokenClassification, AutoTokenizer, pipeline
3
  import gradio as gr
 
 
4
 
5
  # Load GPT-2 model and tokenizer
6
  MODEL_NAME = "gpt2"
7
  tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
8
  model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
9
 
10
+ # Load BioBERT for medical NLP without requiring pip install
11
+ BIOBERT_MODEL = "dmis-lab/biobert-base-cased-v1.1"
12
+ biobert_tokenizer = AutoTokenizer.from_pretrained(BIOBERT_MODEL)
13
+ biobert_model = AutoModelForTokenClassification.from_pretrained(BIOBERT_MODEL)
14
+ nlp_pipeline = pipeline("ner", model=biobert_model, tokenizer=biobert_tokenizer)
15
 
16
  # Conversation memory
17
  conversation_history = []
 
45
  for word in stopwords.split(","):
46
  text = text.replace(word.strip(), "")
47
 
48
+ # Extract medical entities using BioBERT
49
  if extract_entities:
50
+ entities = nlp_pipeline(text)
51
+ extracted_entities = set([entity["word"] for entity in entities])
52
+ text += "\n\nExtracted Medical Entities:\n" + "\n".join(extracted_entities)
53
 
54
  responses.append(text)
55
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,18 +1,81 @@
 
 
1
  import gradio as gr
2
- from transformers import pipeline
 
3
 
4
- generator = pipeline('text-generation', model='gpt2')
 
 
 
5
 
6
- def generate_text(prompt, max_length):
7
- generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
8
- return generated_text[0]['generated_text']
9
 
10
- iface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  fn=generate_text,
12
- inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
- outputs="text",
14
- title="Ứng dụng Generative AI",
15
- description="Tạo văn bản",
 
 
 
 
 
 
 
 
 
 
16
  )
17
 
18
- iface.launch(share=True) # share=True để triển khai lên Spaces
 
 
1
+ import torch
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
  import gradio as gr
4
+ import scispacy
5
+ import spacy
6
 
7
+ # Load GPT-2 model and tokenizer
8
+ MODEL_NAME = "gpt2"
9
+ tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
10
+ model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
11
 
12
+ # Load SciSpacy model for medical NLP
13
+ nlp = spacy.load("en_core_sci_md")
 
14
 
15
+ # Conversation memory
16
+ conversation_history = []
17
+
18
+ # Function to generate text with medical NLP support
19
+ def generate_text(prompt, max_length=100, temperature=0.7, top_k=50, top_p=0.9, clean_output=False, stopwords="", num_responses=1, extract_entities=False):
20
+ global conversation_history
21
+
22
+ # Combine conversation history with new prompt
23
+ full_prompt = "\n".join(conversation_history + [prompt])
24
+ inputs = tokenizer(full_prompt, return_tensors="pt")
25
+
26
+ responses = []
27
+ for _ in range(num_responses):
28
+ with torch.no_grad():
29
+ output = model.generate(
30
+ **inputs,
31
+ max_length=max_length,
32
+ temperature=temperature,
33
+ top_k=top_k,
34
+ top_p=top_p,
35
+ do_sample=True
36
+ )
37
+ text = tokenizer.decode(output[0], skip_special_tokens=True)
38
+
39
+ # Text cleaning
40
+ if clean_output:
41
+ text = text.replace("\n", " ").strip()
42
+
43
+ # Stopword filtering
44
+ for word in stopwords.split(","):
45
+ text = text.replace(word.strip(), "")
46
+
47
+ # Extract medical entities
48
+ if extract_entities:
49
+ doc = nlp(text)
50
+ entities = [(ent.text, ent.label_) for ent in doc.ents]
51
+ text += "\n\nExtracted Medical Entities:\n" + "\n".join([f"{e[0]} ({e[1]})" for e in entities])
52
+
53
+ responses.append(text)
54
+
55
+ # Update conversation history
56
+ conversation_history.append(prompt)
57
+ conversation_history.append(responses[0]) # Store only the first response
58
+
59
+ return "\n\n".join(responses)
60
+
61
+ # Gradio Interface
62
+ demo = gr.Interface(
63
  fn=generate_text,
64
+ inputs=[
65
+ gr.Textbox(label="Medical Query"),
66
+ gr.Slider(50, 500, step=10, label="Max Length"),
67
+ gr.Slider(0.1, 1.5, step=0.1, label="Temperature"),
68
+ gr.Slider(0, 100, step=5, label="Top-K"),
69
+ gr.Slider(0.0, 1.0, step=0.1, label="Top-P"),
70
+ gr.Checkbox(label="Clean Output"),
71
+ gr.Textbox(label="Stopwords (comma-separated)"),
72
+ gr.Slider(1, 5, step=1, label="Number of Responses"),
73
+ gr.Checkbox(label="Extract Medical Entities")
74
+ ],
75
+ outputs=gr.Textbox(label="Generated Medical Text"),
76
+ title="Medical AI Assistant",
77
+ description="Enter a medical-related prompt and adjust parameters to generate AI-assisted text. Supports entity recognition for medical terms.",
78
  )
79
 
80
+ # Launch the app
81
+ demo.launch()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- generator = pipeline('text-generation', model='deepseek-ai/DeepSeek-V2-Lite')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
@@ -12,7 +12,7 @@ iface = gr.Interface(
12
  inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
- description="Tạo văn bản với Llama3",
16
  )
17
 
18
  iface.launch(share=True) # share=True để triển khai lên Spaces
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='gpt2')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
 
12
  inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
+ description="Tạo văn bản",
16
  )
17
 
18
  iface.launch(share=True) # share=True để triển khai lên Spaces
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- generator = pipeline('text-generation', model='meta-llama/Meta-Llama-3-8B')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='deepseek-ai/DeepSeek-V2-Lite')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/main.yml CHANGED
@@ -15,7 +15,15 @@ jobs:
15
  run: |
16
  git config --global user.email "[email protected]"
17
  git config --global user.name "Hachiru"
18
-
 
 
 
 
 
 
 
 
19
  - name: Push to Hugging Face Spaces
20
  run: |
21
  git clone https://huggingface.co/spaces/${{ secrets.HF_USERNAME }}/${{ github.event.repository.name }} hf_space
 
15
  run: |
16
  git config --global user.email "[email protected]"
17
  git config --global user.name "Hachiru"
18
+
19
+ - name: Install dependencies
20
+ run: |
21
+ pip install huggingface_hub
22
+
23
+ - name: Log in to Hugging Face
24
+ run: |
25
+ huggingface-cli login --token ${{ secrets.HF_TOKEN }}
26
+
27
  - name: Push to Hugging Face Spaces
28
  run: |
29
  git clone https://huggingface.co/spaces/${{ secrets.HF_USERNAME }}/${{ github.event.repository.name }} hf_space
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- generator = pipeline('text-generation', model='gpt2')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
@@ -12,7 +12,7 @@ iface = gr.Interface(
12
  inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
- description="Tạo văn bản với GPT-2",
16
  )
17
 
18
  iface.launch(share=True) # share=True để triển khai lên Spaces
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='meta-llama/Meta-Llama-3-8B')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
 
12
  inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
+ description="Tạo văn bản với Llama3",
16
  )
17
 
18
  iface.launch(share=True) # share=True để triển khai lên Spaces
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- generator = pipeline('text-generation', model='meta-llama/Meta-Llama-3-8B')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
@@ -9,7 +9,7 @@ def generate_text(prompt, max_length):
9
 
10
  iface = gr.Interface(
11
  fn=generate_text,
12
- inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Max Length")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
  description="Tạo văn bản với GPT-2",
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='gpt2')
5
 
6
  def generate_text(prompt, max_length):
7
  generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
 
9
 
10
  iface = gr.Interface(
11
  fn=generate_text,
12
+ inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Độ dài văn bản")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
  description="Tạo văn bản với GPT-2",
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,18 +1,18 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- generator = pipeline('text-generation', model='gpt2')
5
 
6
- def generate_text(prompt):
7
- generated_text = generator(prompt, max_length=50, num_return_sequences=1)
8
  return generated_text[0]['generated_text']
9
 
10
  iface = gr.Interface(
11
  fn=generate_text,
12
- inputs=gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"),
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
  description="Tạo văn bản với GPT-2",
16
  )
17
 
18
- iface.launch(share=True) # share=True để triển khai lên Spaces
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='meta-llama/Meta-Llama-3-8B')
5
 
6
+ def generate_text(prompt, max_length):
7
+ generated_text = generator(prompt, max_length=max_length, num_return_sequences=1)
8
  return generated_text[0]['generated_text']
9
 
10
  iface = gr.Interface(
11
  fn=generate_text,
12
+ inputs=[gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"), gr.Slider(minimum=10, maximum=1000, value=50, label="Max Length")],
13
  outputs="text",
14
  title="Ứng dụng Generative AI",
15
  description="Tạo văn bản với GPT-2",
16
  )
17
 
18
+ iface.launch(share=True) # share=True để triển khai lên Spaces
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.github/workflows/main.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face Spaces
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+
7
+ jobs:
8
+ deploy:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - name: Checkout repository
12
+ uses: actions/checkout@v3
13
+
14
+ - name: Setup Git
15
+ run: |
16
+ git config --global user.email "[email protected]"
17
+ git config --global user.name "Hachiru"
18
+
19
+ - name: Push to Hugging Face Spaces
20
+ run: |
21
+ git clone https://huggingface.co/spaces/${{ secrets.HF_USERNAME }}/${{ github.event.repository.name }} hf_space
22
+ rsync -av --exclude='.git' ./ hf_space/
23
+ cd hf_space
24
+ git add .
25
+ git commit -m "Update from GitHub Actions"
26
+ git push https://${{ secrets.HF_USERNAME }}:${{ secrets.HF_TOKEN }}@huggingface.co/spaces/${{ secrets.HF_USERNAME }}/${{ github.event.repository.name }} main
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ generator = pipeline('text-generation', model='gpt2')
5
+
6
+ def generate_text(prompt):
7
+ generated_text = generator(prompt, max_length=50, num_return_sequences=1)
8
+ return generated_text[0]['generated_text']
9
+
10
+ iface = gr.Interface(
11
+ fn=generate_text,
12
+ inputs=gr.Textbox(lines=2, placeholder="Nhập văn bản đầu vào"),
13
+ outputs="text",
14
+ title="Ứng dụng Generative AI",
15
+ description="Tạo văn bản với GPT-2",
16
+ )
17
+
18
+ iface.launch(share=True) # share=True để triển khai lên Spaces
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Chigas
3
+ emoji: 🐠
4
+ colorFrom: pink
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.20.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pytorch
2
+ tensorflow
3
+ transformers
4
+ gradio
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- pytorch
2
  tensorflow
 
3
  transformers
4
  gradio
 
1
+ torch
2
  tensorflow
3
+ tf-keras
4
  transformers
5
  gradio
hf_space/hf_space/hf_space/hf_space/hf_space/requirements.txt CHANGED
@@ -3,3 +3,5 @@ tensorflow
3
  tf-keras
4
  transformers
5
  gradio
 
 
 
3
  tf-keras
4
  transformers
5
  gradio
6
+ spacy
7
+ scispacy
hf_space/requirements.txt CHANGED
@@ -3,5 +3,3 @@ tensorflow
3
  tf-keras
4
  transformers
5
  gradio
6
- spacy
7
- scispacy
 
3
  tf-keras
4
  transformers
5
  gradio