annotation_dev / app.py
ppsingh's picture
Update app.py
44e0cdf
raw
history blame
2.04 kB
import streamlit as st
import pandas as pd
from huggingface_hub import Repository
import os
from pathlib import Path
import json
# Declaring the variables for later use to talk to dataset
# the token is saved as secret key-value pair in the environment which can be access as shown below
auth_token = os.environ.get("space_to_dataset") or True
DATASET_REPO_URL = 'ppsingh/annotation_data' # path to dataset repo
DATA_FILENAME = "paralist.json"
DATA_FILE = os.path.join("data", DATA_FILENAME)
# cloning the dataset repo
repo = Repository( local_dir="data", clone_from=DATASET_REPO_URL, repo_type="dataset", use_auth_token= auth_token)
# Data file name
file_name = 'paralist.json'
# reading the json
with open('data/{}'.format(file_name), 'r', encoding="utf8") as json_file:
paraList = json.load(json_file)
# getting outer level keys in json
keys = paraList.keys()
#data = pd.read_csv("test.csv")
# sidebar with info and drop down to select from the keys
st.sidebar.markdown(
"""
# Data Annotation Demo
This app is demo how to use the space to provide user interface for the data annotation/tagging. The data resides in repo_type 'dataset'.
"""
)
topic = None
if keys is not None:
topic = st.sidebar.selectbox(
label="Choose dataset topic to load", options=keys )
# st.write(line)
if topic is not None:
c1, c2, c3 = st.columns([3, 1, 1])
title = st.text_input('Movie title', 'Life of Brian')
if st.button('Submit'):
new_row = title
# data = data.append(new_row, ignore_index=True)
# st.write(data)
# st.write(os.getcwd())
# data.to_csv('test.csv', index= False)
#st.write(df)
# st.write('data/test.csv')
# iterate over files in
# that directory
#directory = os.getcwd()
#files = Path(directory).glob('*')
#for file in files:
# st.write(file)
#with open(DATA_FILE, "a") as csvfile:
# writer = csv.DictWriter(csvfile, fieldnames=["Sentences"])
# writer.writerow({'Sentences': new_row})
# repo.push_to_hub('adding new line')
# st.write('Succcess')