File size: 1,924 Bytes
46af628
14ff9c4
b62e42f
6169f27
02d9582
e2393a1
2fc2e11
02d9582
e2393a1
d70c8d8
e2393a1
 
 
 
 
 
 
 
 
e8ad8b1
 
4728363
e8ad8b1
 
da7e215
 
e2393a1
 
 
 
 
 
fc93e08
e8ad8b1
 
 
da7e215
fc93e08
 
 
 
 
 
 
9031d57
 
075f350
da7e215
 
 
 
 
e2393a1
e8ad8b1
c475583
da7e215
 
 
 
 
 
 
 
c8b0725
c9be650
48111cb
da7e215
 
 
 
 
b3d7164
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import streamlit as st
import pandas as pd
from huggingface_hub import Repository
import os 
from pathlib import Path
import json

 
# Declaring the variables for later use to talk to dataset

# the token is saved as secret key-value pair in the environment which can be access as shown below
auth_token = os.environ.get("space_to_dataset") or True

DATASET_REPO_URL = 'ppsingh/annotation_data'   # path to dataset repo
DATA_FILENAME = "paralist.json"
DATA_FILE = os.path.join("data", DATA_FILENAME)

# cloning the dataset repo
repo = Repository( local_dir="data", clone_from=DATASET_REPO_URL, repo_type="dataset", use_auth_token= auth_token)
import json
file_name = 'paralist.json'
with open('data/{}'.format(file_name), 'r', encoding="utf8") as json_file:
  paraList = json.load(json_file)
keys = paraList.keys()              
#data = pd.read_csv("test.csv")
#for line in data:
st.sidebar.markdown(
    """
# Data Annotation Demo 
This app is demo how to use the space to provide user interface for the data annotation/tagging. The data resides in repo_type 'dataset'.
"""
)
topic = None
if keys is not None:
  topic = st.sidebar.selectbox(
    label="Choose dataset topic to load", options=keys )
#  st.write(line)

if topic is not None:
  c1, c2, c3 = st.columns([3, 1, 1])

  
  
      
title = st.text_input('Movie title', 'Life of Brian')
if st.button('Submit'):
  new_row  = title
#  data = data.append(new_row, ignore_index=True)
#  st.write(data)
#  st.write(os.getcwd())
#  data.to_csv('test.csv', index= False)


#st.write(df)
#   st.write('data/test.csv')
# iterate over files in
# that directory        
directory = os.getcwd()
files = Path(directory).glob('*')
for file in files:
    st.write(file)

with open(DATA_FILE, "a") as csvfile:
  writer = csv.DictWriter(csvfile, fieldnames=["Sentences"])
  writer.writerow({'Sentences': new_row})
  repo.push_to_hub('adding new line')
  st.write('Succcess')