AnkitS1997 commited on
Commit
e530b85
·
1 Parent(s): d357382

updated streamlit file

Browse files
.ipynb_checkpoints/start-checkpoint.sh CHANGED
@@ -1,7 +1,7 @@
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
- uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py \
 
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
+ # uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py \
.ipynb_checkpoints/streamlit_app-checkpoint.py CHANGED
@@ -6,16 +6,16 @@ import torch
6
  import io
7
 
8
 
9
- # @st.cache_resource
10
- # def load_model():
11
- # model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
- # model.load_adapter('blip-cpu-model')
13
- # processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- # model.to(device)
16
- # return model, processor
17
 
18
- # model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
@@ -25,16 +25,16 @@ if uploaded_file is not None:
25
  image = Image.open(uploaded_file)
26
  st.image(image, caption="Uploaded Image", use_column_width=True)
27
 
28
- files = {"file": uploaded_file.getvalue()}
29
- print("Sending API request")
30
- response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
- caption = response.json().get("caption")
32
 
33
- # inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
 
35
- # with torch.no_grad():
36
- # caption_ids = model.generate(**inputs, max_length=128)
37
- # caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")
 
6
  import io
7
 
8
 
9
+ @st.cache_resource
10
+ def load_model():
11
+ model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
+ model.load_adapter('blip-cpu-model')
13
+ processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ model.to(device)
16
+ return model, processor
17
 
18
+ model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
 
25
  image = Image.open(uploaded_file)
26
  st.image(image, caption="Uploaded Image", use_column_width=True)
27
 
28
+ # files = {"file": uploaded_file.getvalue()}
29
+ # print("Sending API request")
30
+ # response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
+ # caption = response.json().get("caption")
32
 
33
+ inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
 
35
+ with torch.no_grad():
36
+ caption_ids = model.generate(**inputs, max_length=128)
37
+ caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")
start.sh CHANGED
@@ -1,7 +1,7 @@
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
- uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py \
 
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
+ # uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py \
streamlit_app.py CHANGED
@@ -6,16 +6,16 @@ import torch
6
  import io
7
 
8
 
9
- # @st.cache_resource
10
- # def load_model():
11
- # model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
- # model.load_adapter('blip-cpu-model')
13
- # processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
- # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- # model.to(device)
16
- # return model, processor
17
 
18
- # model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
@@ -25,16 +25,16 @@ if uploaded_file is not None:
25
  image = Image.open(uploaded_file)
26
  st.image(image, caption="Uploaded Image", use_column_width=True)
27
 
28
- files = {"file": uploaded_file.getvalue()}
29
- print("Sending API request")
30
- response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
- caption = response.json().get("caption")
32
 
33
- # inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
 
35
- # with torch.no_grad():
36
- # caption_ids = model.generate(**inputs, max_length=128)
37
- # caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")
 
6
  import io
7
 
8
 
9
+ @st.cache_resource
10
+ def load_model():
11
+ model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
+ model.load_adapter('blip-cpu-model')
13
+ processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ model.to(device)
16
+ return model, processor
17
 
18
+ model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
 
25
  image = Image.open(uploaded_file)
26
  st.image(image, caption="Uploaded Image", use_column_width=True)
27
 
28
+ # files = {"file": uploaded_file.getvalue()}
29
+ # print("Sending API request")
30
+ # response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
+ # caption = response.json().get("caption")
32
 
33
+ inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
 
35
+ with torch.no_grad():
36
+ caption_ids = model.generate(**inputs, max_length=128)
37
+ caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")