import logging from pathlib import Path import gradio as gr from datasets import Dataset from gradio_log import Log from huggingface_hub import DatasetCard from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import MetadataMode from tqdm.auto import tqdm log_file = "logs.txt" Path(log_file).touch(exist_ok=True) logging.basicConfig(filename="logs.txt", level=logging.INFO) logging.getLogger().addHandler(logging.FileHandler(log_file)) def load_corpus(files, chunk_size=256, chunk_overlap=0, verbose=True): if verbose: gr.Info("Loading files...") reader = SimpleDirectoryReader(input_files=files) docs = reader.load_data() if verbose: print(f"Loaded {len(docs)} docs") parser = SentenceSplitter.from_defaults( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) nodes = parser.get_nodes_from_documents(docs, show_progress=verbose) if verbose: print(f"Parsed {len(nodes)} nodes") docs = { node.node_id: node.get_content(metadata_mode=MetadataMode.NONE) for node in tqdm(nodes) } # remove empty docs docs = {k: v for k, v in docs.items() if v} return docs def upload_file( files, chunk_size: int = 256, chunk_overlap: int = 0, hub_id: str = None, private: bool = False, oauth_token: gr.OAuthToken = None, ): print("loading files") file_paths = [file.name for file in files] print("parsing into sentences") corpus = load_corpus(file_paths, chunk_size=chunk_size, chunk_overlap=chunk_overlap) print("Creating dataset") dataset = Dataset.from_dict({"ids": corpus.keys(), "texts": corpus.values()}) message = f"Dataset created has: \n - {len(dataset)} rows" if hub_id: if oauth_token is not None: gr.Info("Uploading to Hugging Face Hub") dataset.push_to_hub(hub_id, token=oauth_token.token, private=private) update_dataset_card(hub_id, oauth_token.token, chunk_size, chunk_overlap) message += f"\n\nUploaded to [{hub_id}](https://huggingface.co/{hub_id}" else: raise gr.Error("Please login to Hugging Face Hub to push to hub") return dataset.to_pandas(), message def update_dataset_card( hub_id, token, chunk_size, chunk_overlap, ): card = DatasetCard.load(hub_id, token=token) if not card.text: # add template description to card text card.text += f"""This dataset was created using [Corpus Creator](https://huggingface.co/spaces/davanstrien/corpus-creator). This dataset was created by parsing a corpus of text files into chunks of sentences using Llama Index. This processing was done with a chunk size of {chunk_size} and a chunk overlap of {chunk_overlap}.""" tags = card.data.get("tags", []) tags.append("corpus-creator") card.data["tags"] = tags card.push_to_hub(hub_id, token=token) description = """ Corpus Creator is a tool designed to help you easily convert a collection of text files into a dataset suitable for various natural language processing (NLP) tasks. In particular the app is focused on splitting texts into chunks of a specified size and overlap. This can be useful for preparing data for synthetic data generation, pipelines or annotation tasks. See an [example dataset](davanstrien/MOH-Bethnal-Green) created using this tool starting from a collection of plain text files. The resulting text chunks are stored in a dataset that can be previewed and uploaded to the Hugging Face Hub for easy sharing and access by the community. The chunking is done using `Llama-index`'s [`SentenceSplitter`](https://docs.llamaindex.ai/en/stable/module_guides/loading/node_parsers/modules/?h=sentencesplitter#sentencesplitter) classes. ### Usage: - Login: Start by logging in to your Hugging Face account using the provided login button. - Set Parameters: Customize the chunk size and overlap according to your requirements. - Upload Files: Use the upload button to load file(s) for processing. - Preview Dataset: View the created dataset in a dataframe format before uploading it to the Hugging Face Hub. - Upload to Hub: Optionally, specify the Hub ID and choose whether to make the dataset private before pushing it to the Hugging Face Hub.""" with gr.Blocks() as demo: gr.HTML( """