miracle01 commited on
Commit
835e738
·
1 Parent(s): 3898bce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -52
app.py CHANGED
@@ -158,8 +158,7 @@ def main():
158
  with st.sidebar:
159
  st.image(side_img, width=300)
160
  st.sidebar.subheader("Menu")
161
- website_menu = st.sidebar.selectbox("Menu", ("Emotion Recognition", "Project description", "Our team",
162
- "Leave feedback", "Relax"))
163
  st.set_option('deprecation.showfileUploaderEncoding', False)
164
 
165
  if website_menu == "Emotion Recognition":
@@ -417,8 +416,7 @@ def main():
417
  st.markdown(txt, unsafe_allow_html=True)
418
 
419
  st.subheader("Theory")
420
- link = '[Theory behind - Medium article]' \
421
- '(https://talbaram3192.medium.com/classifying-emotions-using-audio-recordings-and-python-434e748a95eb)'
422
  st.markdown(link + ":clap::clap::clap: Tal!", unsafe_allow_html=True)
423
  with st.expander("See Wikipedia definition"):
424
  components.iframe("https://en.wikipedia.org/wiki/Emotion_recognition",
@@ -426,13 +424,15 @@ def main():
426
 
427
  st.subheader("Dataset")
428
  txt = """
429
- This web-application is a part of the final **Data Mining** project for **ITC Fellow Program 2020**.
430
 
431
  Datasets used in this project
432
  * Crowd-sourced Emotional Mutimodal Actors Dataset (**Crema-D**)
433
  * Ryerson Audio-Visual Database of Emotional Speech and Song (**Ravdess**)
434
  * Surrey Audio-Visual Expressed Emotion (**Savee**)
435
- * Toronto emotional speech set (**Tess**)
 
 
436
  """
437
  st.markdown(txt, unsafe_allow_html=True)
438
 
@@ -440,52 +440,6 @@ def main():
440
  fig = px.violin(df, y="source", x="emotion4", color="actors", box=True, points="all", hover_data=df.columns)
441
  st.plotly_chart(fig, use_container_width=True)
442
 
443
- st.subheader("FYI")
444
- st.write("Since we are currently using a free tier instance of AWS, "
445
- "we disabled mel-spec and ensemble models.\n\n"
446
- "If you want to try them we recommend to clone our GitHub repo")
447
- st.code("git clone https://github.com/CyberMaryVer/speech-emotion-webapp.git", language='bash')
448
-
449
- st.write("After that, just uncomment the relevant sections in the app.py file "
450
- "to use these models:")
451
-
452
- elif website_menu == "Our team":
453
- st.subheader("Our team")
454
- st.balloons()
455
- col1, col2 = st.columns([3, 2])
456
- with col1:
457
- st.info("[email protected]")
458
- st.info("[email protected]")
459
- st.info("[email protected]")
460
- with col2:
461
- liimg = Image.open("images/LI-Logo.png")
462
- st.image(liimg)
463
- st.markdown(f""":speech_balloon: [Maria Startseva](https://www.linkedin.com/in/maria-startseva)""",
464
- unsafe_allow_html=True)
465
- st.markdown(f""":speech_balloon: [Tal Baram](https://www.linkedin.com/in/tal-baram-b00b66180)""",
466
- unsafe_allow_html=True)
467
- st.markdown(f""":speech_balloon: [Asher Holder](https://www.linkedin.com/in/asher-holder-526a05173)""",
468
- unsafe_allow_html=True)
469
-
470
- elif website_menu == "Leave feedback":
471
- st.subheader("Leave feedback")
472
- user_input = st.text_area("Your feedback is greatly appreciated")
473
- user_name = st.selectbox("Choose your personality", ["checker1", "checker2", "checker3", "checker4"])
474
-
475
- if st.button("Submit"):
476
- st.success(f"Message\n\"\"\"{user_input}\"\"\"\nwas sent")
477
-
478
- if user_input == "log123456" and user_name == "checker4":
479
- with open("log0.txt", "r", encoding="utf8") as f:
480
- st.text(f.read())
481
- elif user_input == "feedback123456" and user_name == "checker4":
482
- with open("log.txt", "r", encoding="utf8") as f:
483
- st.text(f.read())
484
- else:
485
- log_file(user_name + " " + user_input)
486
- thankimg = Image.open("images/sticky.png")
487
- st.image(thankimg)
488
-
489
  else:
490
  import requests
491
  import json
 
158
  with st.sidebar:
159
  st.image(side_img, width=300)
160
  st.sidebar.subheader("Menu")
161
+ website_menu = st.sidebar.selectbox("Menu", ("Emotion Recognition", "Project description", "Relax"))
 
162
  st.set_option('deprecation.showfileUploaderEncoding', False)
163
 
164
  if website_menu == "Emotion Recognition":
 
416
  st.markdown(txt, unsafe_allow_html=True)
417
 
418
  st.subheader("Theory")
419
+ link = '[Theory behind - ]'
 
420
  st.markdown(link + ":clap::clap::clap: Tal!", unsafe_allow_html=True)
421
  with st.expander("See Wikipedia definition"):
422
  components.iframe("https://en.wikipedia.org/wiki/Emotion_recognition",
 
424
 
425
  st.subheader("Dataset")
426
  txt = """
427
+ This machine learning web-application PROJECT is a partial fulfillment of requirement of Higher National Diploma (HND) computer science **The Federal College of Animal Health and Production Technology** **FCAHPTIB, 2023**.
428
 
429
  Datasets used in this project
430
  * Crowd-sourced Emotional Mutimodal Actors Dataset (**Crema-D**)
431
  * Ryerson Audio-Visual Database of Emotional Speech and Song (**Ravdess**)
432
  * Surrey Audio-Visual Expressed Emotion (**Savee**)
433
+ * Toronto emotional speech set (**Tess**)
434
+
435
+ The above datasets was used in the model training of this software before deployment
436
  """
437
  st.markdown(txt, unsafe_allow_html=True)
438
 
 
440
  fig = px.violin(df, y="source", x="emotion4", color="actors", box=True, points="all", hover_data=df.columns)
441
  st.plotly_chart(fig, use_container_width=True)
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  else:
444
  import requests
445
  import json