File size: 29,287 Bytes
0b2c988
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
from unstructured.partition.auto import partition 
from unstructured.chunking.title import chunk_by_title
from unstructured.chunking.basic import chunk_elements 
from unstructured.documents.elements import Element, Title, CompositeElement
from unstructured.staging.base import convert_to_dataframe
from typing import Type, List, Literal, Tuple

from unstructured.cleaners.core import replace_unicode_quotes, clean_non_ascii_chars, clean_ordered_bullets, group_broken_paragraphs, replace_unicode_quotes, clean, clean_trailing_punctuation, remove_punctuation, bytes_string_to_string
import gradio as gr
import time
import pandas as pd
import re
import gzip
import pickle
from pydantic import BaseModel, Field

from tools.helper_functions import get_file_path_end, get_file_path_end_with_ext

# Creating an alias for pandas DataFrame using Type
PandasDataFrame = Type[pd.DataFrame]

# %%
# pdf partitioning strategy vars
pdf_partition_strat = "ocr_only" # ["fast", "ocr_only", "hi_res"]

# %%
# Element metadata modification vars
meta_keys_to_filter = ["file_directory", "filetype"]
element_types_to_filter = ['UncategorizedText', 'Header']

# %%
# Clean function vars

bytes_to_string=False
replace_quotes=True 
clean_non_ascii=False 
clean_ordered_list=True 
group_paragraphs=True
trailing_punctuation=False
all_punctuation=False
clean_text=True 
extra_whitespace=True 
dashes=True 
bullets=True 
lowercase=False

# %%
# Chunking vars

minimum_chunk_length = 2000
start_new_chunk_after_end_of_this_element_length = 2000
hard_max_character_length_chunks = 3000
multipage_sections=True
overlap_all=True
include_orig_elements=True

# %%
class Document(BaseModel):
    """Class for storing a piece of text and associated metadata. Implementation adapted from Langchain code: https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/documents/base.py"""

    page_content: str
    """String text."""
    metadata: dict = Field(default_factory=dict)
    """Arbitrary metadata about the page content (e.g., source, relationships to other
        documents, etc.).
    """
    type: Literal["Document"] = "Document"

# %%
def create_title_id_dict(elements:List[Element]):

    # Assuming the object is stored in a variable named 'elements_list'
    titles = [item.text for item in elements if isinstance(item, Title)]

    #### Get all elements under these titles
    chapter_ids = {}
    for element in elements:
        for chapter in titles:
            if element.text == chapter and element.category == "Title":
                chapter_ids[element._element_id] = chapter
                break

    chapter_to_id = {v: k for k, v in chapter_ids.items()}

    return chapter_ids, chapter_to_id

# %%
def filter_elements(elements:List[Element], excluded_elements: List[str] = ['']):
    """
    Filter out elements from a list based on their categories.

    Args:
        elements: The list of elements to filter.
        excluded_elements: A list of element categories to exclude.

    Returns:
        A new list containing the filtered elements.
    """
    filtered_elements = []
    for element in elements:
        if element.category not in excluded_elements:
            filtered_elements.append(element)
    return filtered_elements

# %%
def remove_keys_from_meta(
    elements: List[Element], 
    meta_remove_keys: List[str], 
    excluded_element_types: List[str] = []
) -> List[Element]:
    '''
    Remove specified metadata keys from an Unstructured Element object
    '''

    for element in elements:
        if element.category not in excluded_element_types:
            for key in meta_remove_keys:
                try:
                    del element.metadata.__dict__[key]  # Directly modify metadata
                except KeyError:
                    print(f"Key '{key}' not found in element metadata.")

    return elements

def filter_elements_and_metadata(
    elements: List[Element],
    excluded_categories: List[str] = [],
    meta_remove_keys: List[str] = [],
) -> List[Element]:
    """
    Filters elements based on categories and removes specified metadata keys.

    Args:
        elements: The list of elements to process.
        excluded_categories: A list of element categories to exclude.
        meta_remove_keys: A list of metadata keys to remove.

    Returns:
        A new list containing the processed elements.
    """

    filtered_elements = []
    for element in elements:
        if element.category not in excluded_categories:
            for key in meta_remove_keys:
                try:
                    del element.metadata.__dict__[key]
                except KeyError:
                    # Better logging/error handling instead of just printing
                    # Use a proper logger or raise a warning/exception
                    pass 
            filtered_elements.append(element)

    return filtered_elements

# %%
def add_parent_title_to_meta(elements:List[Element], chapter_ids:List[str], excluded_element_types:List[str]=['']) -> List[Element]:
    '''
    Add parent title to Unstructured metadata elements
    
    '''
    for element in elements:
        if element.category in excluded_element_types:
            pass

        else:
            meta = element.metadata.to_dict()
            
            if "parent_id" in meta and meta["parent_id"] in chapter_ids and "title_name" not in meta:
                title_name = chapter_ids[meta["parent_id"]]
                # Directly modify the existing element metadata object
                element.metadata.title_name = title_name

    return elements


def chunk_all_elements(elements:List[Element], file_name_base:str, chunk_type:str = "Basic_chunking",  minimum_chunk_length:int=minimum_chunk_length, start_new_chunk_after_end_of_this_element_length:int=start_new_chunk_after_end_of_this_element_length, hard_max_character_length_chunks:int=hard_max_character_length_chunks, multipage_sections:bool=multipage_sections, overlap_all:bool=overlap_all, include_orig_elements:bool=include_orig_elements):

    '''
    Use Unstructured.io functions to chunk an Element object by Title or across all elements.
    '''
    output_files = []
    output_summary = ""

    chapter_ids, chapter_to_id = create_title_id_dict(elements)
    
    ### Break text down into chunks

    try:

        if chunk_type == "Chunk within title":
            chunks = chunk_by_title(
                elements,
                include_orig_elements=include_orig_elements,
                combine_text_under_n_chars=minimum_chunk_length,
                new_after_n_chars=start_new_chunk_after_end_of_this_element_length,
                max_characters=hard_max_character_length_chunks,
                multipage_sections=multipage_sections,
                overlap_all=overlap_all
            )

        else:
            chunks = chunk_elements(
                elements,
                include_orig_elements=include_orig_elements,
                new_after_n_chars=start_new_chunk_after_end_of_this_element_length,
                max_characters=hard_max_character_length_chunks,
                overlap_all=overlap_all
            )
    
    except Exception as output_summary:
        print(output_summary)
        return output_summary, output_files, file_name_base

    chunk_sections, chunk_df, chunks_out = element_chunks_to_document(chunks, chapter_ids)

    file_name_suffix = "_chunk"

    # The new file name does not overwrite the old file name as the 'chunked' elements are only used as an output, and not an input to other functions
    output_summary, output_files, file_name_base_new = export_elements_as_table_to_file(chunks_out, file_name_base, file_name_suffix, chunk_sections)

    return output_summary, output_files, file_name_base

# %%
def element_chunks_to_document(chunks:CompositeElement, chapter_ids:List[str]) -> Tuple[List[Document], PandasDataFrame, List[str]]:
    '''
    Take an Unstructured.io chunk_by_title output with the original parsed document elements and turn it into a Document format commonly used by vector databases, and a Pandas dataframe. 
    '''
    chunk_sections = []
    current_title_id = ''
    current_title = ''
    last_page = ''
    chunk_df_list = []

    for chunk in chunks:
        chunk_meta = chunk.metadata.to_dict()
        true_element_ids = []
        element_categories = []
        titles = []
        titles_id = []        

        if "page_number" in chunk_meta:
            last_page = chunk_meta["page_number"]

        chunk_text = chunk.text
        #chunk_page_number = chunk.metadata.to_dict()["page_number"]

        # If the same element text is found, add the element_id to the chunk (NOT PERFECT. THIS WILL FAIL IF THE SAME TEXT IS SEEN MULTIPL TIMES)
        for element in chunk.metadata.orig_elements:
            
            #element_text = element.text
            element_id = element._element_id
            element_category = element.category
            element_meta = element.metadata.to_dict()

            if "page_number" in element_meta:
                element_page_number = element_meta["page_number"]
                last_page = element_page_number

            true_element_ids.append(element_id)
            element_categories.append(element_category)
            

        # Set new metadata for chunk
        if "page_number" in element_meta:
            chunk_meta["last_page_number"] = last_page
        
        chunk_meta["true_element_ids"] = true_element_ids        

        for loop_id in chunk_meta['true_element_ids']:
            if loop_id in chapter_ids:
                current_title = chapter_ids[loop_id]
                current_title_id = loop_id

                titles.append(current_title)
                titles_id.append(current_title_id)        
                
        chunk_meta['titles'] = titles
        chunk_meta['titles_id'] = titles_id

        # Remove original elements data for documents
        chunk_meta.pop('orig_elements')

        chunk_dict_for_df = chunk_meta.copy()
        chunk_dict_for_df['text'] = chunk.text

        chunk_df_list.append(chunk_dict_for_df)

        
        chunk_doc = [Document(page_content=chunk_text, metadata=chunk_meta)]
        chunk_sections.extend(chunk_doc)

        ## Write metadata back to elements
        chunk.metadata.__dict__ = chunk_meta

    chunk_df = pd.DataFrame(chunk_df_list)

    # print("Doc format: ", chunk_sections)

    return chunk_sections, chunk_df, chunks

# %%
def write_elements_to_documents(elements:List[Element]):
    '''
    Take Unstructured.io parsed elements and write it into a 'Document' format commonly used by vector databases
    '''

    doc_sections = []

    for element in elements:
        meta = element.metadata.to_dict()

        meta["type"] = element.category
        meta["element_id"] = element._element_id

        element_doc = [Document(page_content=element.text, metadata= meta)]
        doc_sections.extend(element_doc)

        #print("Doc format: ", doc_sections)


    return doc_sections

# %%
def clean_elements(elements:List[Element], dropdown_options: List[str] = [''], 
                       output_name:str = "combined_elements",
                       bytes_to_string:bool=False,
                       replace_quotes:bool=True, 
                       clean_non_ascii:bool=False, 
                       clean_ordered_list:bool=True, 
                       group_paragraphs:bool=True,
                       trailing_punctuation:bool=False,
                       all_punctuation:bool=False,
                       clean_text:bool=True, 
                       extra_whitespace:bool=True, 
                       dashes:bool=True, 
                       bullets:bool=True, 
                       lowercase:bool=False) -> List[Element]:
    
    '''
    Apply Unstructured cleaning processes to a list of parse elements.
    '''

    out_files = []
    output_summary = ""

    # Set variables to True based on dropdown selections
    for option in dropdown_options:
        if option == "Convert bytes to string":
            bytes_to_string = True
        elif option == "Replace quotes":
            replace_quotes = True
        elif option == "Clean non ASCII":
            clean_non_ascii = True
        elif option == "Clean ordered list":
            clean_ordered_list = True
        elif option == "Group paragraphs":
            group_paragraphs = True
        elif option == "Remove trailing punctuation":
            trailing_punctuation = True
        elif option == "Remove all punctuation":
            all_punctuation = True
        elif option == "Clean text":
            clean_text = True
        elif option == "Remove extra whitespace":
            extra_whitespace = True
        elif option == "Remove dashes":
            dashes = True
        elif option == "Remove bullets":
            bullets = True
        elif option == "Make lowercase":
            lowercase = True
           

    cleaned_elements = elements.copy()

    for element in cleaned_elements:

        try:
            if element:  # Check if element is not None or empty
                if bytes_to_string:
                    element.apply(bytes_string_to_string)
                if replace_quotes:
                    element.apply(replace_unicode_quotes)
                if clean_non_ascii:
                    element.apply(clean_non_ascii_chars)
                if clean_ordered_list:
                    element.apply(clean_ordered_bullets)
                if group_paragraphs:
                    element.apply(group_broken_paragraphs)
                if trailing_punctuation:
                    element.apply(clean_trailing_punctuation)
                if all_punctuation:
                    element.apply(remove_punctuation)
                if group_paragraphs:
                    element.apply(group_broken_paragraphs)
                if clean_text:
                    element.apply(lambda x: clean(x, extra_whitespace=extra_whitespace, dashes=dashes, bullets=bullets, lowercase=lowercase))
        except Exception as e:
            print(e)
            element = element

    alt_out_message, out_files, output_file_base = export_elements_as_table_to_file(cleaned_elements, output_name, file_name_suffix="_clean")

    output_summary = "Text elements successfully cleaned."
    print(output_summary)

    return cleaned_elements, output_summary, out_files, output_file_base

# %% [markdown]
def export_elements_as_table_to_file(elements:List[Element], file_name_base:str, file_name_suffix:str="", chunk_documents:List[Document]=[]):
    '''
    Export elements as as a table.
    '''
    output_summary = ""
    out_files = []

    # Convert to dataframe format
    out_table = convert_to_dataframe(elements)

    # If the file suffix already exists in the output file name, don't add it again.
    if file_name_suffix not in file_name_base:
        out_file_name_base = file_name_base + file_name_suffix

    else:
        out_file_name_base = file_name_base
        
    out_file_name = "output/" + out_file_name_base + ".csv"

    out_table.to_csv(out_file_name)
    out_files.append(out_file_name)

    # Convert to document format
    if chunk_documents:
        out_documents = chunk_documents
    else:
        out_documents = write_elements_to_documents(elements)

    

    out_file_name_docs = "output/" + out_file_name_base + "_docs.pkl.gz"
    with gzip.open(out_file_name_docs, 'wb') as file:
        pickle.dump(out_documents, file)

    out_files.append(out_file_name_docs)

    output_summary = "File successfully exported."

    return output_summary, out_files, out_file_name_base

# # Partition PDF

def get_file_type(filename):
    pattern = r"\.(\w+)$"  # Match a dot followed by one or more word characters at the end of the string

    match = re.search(pattern, filename)
    if match:
        file_type = match.group(1)  # Extract the captured file type (without the dot)
        print(file_type)  # Output: "png"
    else:
        print("No file type found.")

    return file_type 

# %%
def partition_file(filenames:List[str], pdf_partition_strat:str = pdf_partition_strat, progress = gr.Progress()):
    '''
    Partition document files into text elements using the Unstructured package. Currently supports PDF, docx, pptx, html, several image file types, text document types, email messages, code files.
    '''

    out_message = ""
    combined_elements = []
    out_files = []

    for file in progress.tqdm(filenames, desc="Partitioning files", unit="files"):

        try:

            tic = time.perf_counter()
            print(file)

            file_name = get_file_path_end_with_ext(file)
            file_name_base = get_file_path_end(file)
            file_type = get_file_type(file_name)

            image_file_type_list = ["jpg", "jpeg", "png", "heic"]

            if file_type in image_file_type_list:
                print("File is an image. Using OCR method to partition.")
                file_elements = partition(file, strategy="ocr_only")
            else:
                file_elements = partition(file, strategy=pdf_partition_strat)

            toc = time.perf_counter()


            new_out_message = f"Successfully partitioned file: {file_name} in {toc - tic:0.1f} seconds\n"
            print(new_out_message)

            out_message = out_message + new_out_message
            combined_elements.extend(file_elements)

        except Exception as e:
            new_out_message = f"Failed to partition file:  {file_name} due to {e}. Partitioning halted."
            print(new_out_message)
            out_message = out_message + new_out_message
            break

    out_table = convert_to_dataframe(combined_elements)

    # If multiple files, overwrite default file name for outputs
    if len(filenames) > 1:
        file_name_base = "combined_files"

    alt_out_message, out_files, output_file_base = export_elements_as_table_to_file(combined_elements, file_name_base, file_name_suffix="_elements")

    return out_message, combined_elements, out_files, output_file_base, out_table
        
# %%
def modify_metadata_elements(elements_out_cleaned:List[Element], meta_keys_to_filter:List[str]=meta_keys_to_filter, element_types_to_filter:List[str]=element_types_to_filter) -> List[Element]:

    '''
    Take an element object, add parent title names to metadata. Remove specified metadata keys or element types from element list.
    '''

    chapter_ids, chapter_to_id = create_title_id_dict(elements_out_cleaned.copy())
    elements_out_meta_mod = add_parent_title_to_meta(elements_out_cleaned.copy(), chapter_ids)
    elements_out_meta_mod_meta_filt = remove_keys_from_meta(elements_out_meta_mod.copy(), meta_keys_to_filter)
    elements_out_filtered_meta_mod = filter_elements(elements_out_meta_mod_meta_filt, element_types_to_filter)

    return elements_out_filtered_meta_mod
# %%
# file_stub = "C:/Users/SPedrickCase/OneDrive - Lambeth Council/Apps/doc_rag_prep/examples/"
# filenames = []
# pdf_filename = [file_stub + "Lambeth_2030-Our_Future_Our_Lambeth_foreword.pdf"]
# filenames.extend(pdf_filename)

# html_filename = [file_stub + "transport-strategy.html"]
# filenames.extend(html_filename)

# docx_filename = [file_stub + "FINAL Policy and Procedure for Writing Housing Policies.docx"]
# filenames.extend(docx_filename)

# out_message, elements_parse = partition_file(filenames=filenames, pdf_partition_strat="ocr_only")

# for element in elements_parse[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")
#     elements_out = elements_parse.copy()

# %% [markdown]
# ###  Process with document layout detection - fast strategy
# 
# The "fast" strategy will extract the text using pdfminer and process the raw text with partition_text. If the PDF text is not extractable, partition_pdf will fall back to "ocr_only". We recommend using the "fast" strategy in most cases where the PDF has extractable text.
# elements_out_parse = partition_pdf(filename=filename, strategy="fast")
# for element in elements_out_parse[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")
#  elements_out = elements_out_parse.copy()
# ### OCR only
# 
# The "ocr_only" strategy runs the document through Tesseract for OCR and then runs the raw text through partition_text. Currently, "hi_res" has difficulty ordering elements for documents with multiple columns. If you have a document with multiple columns that does not have extractable text, we recommend using the "ocr_only" strategy. "ocr_only" falls back to "fast" if Tesseract is not available and the document has extractable text.
#  elements_out_parse = partition_pdf(filename=filename, strategy="ocr_only")
#  for element in elements_out_parse[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")
#     elements_out = elements_out_parse.copy()
# ### Hi-res partitioning
# 
# The "hi_res" strategy will identify the layout of the document using detectron2. The advantage of “hi_res” is that it uses the document layout to gain additional information about document elements. We recommend using this strategy if your use case is highly sensitive to correct classifications for document elements. If detectron2 is not available, the "hi_res" strategy will fall back to the "ocr_only" strategy.
# elements_out = partition_pdf(filename=filename, strategy="hi_res")
# for element in elements_out[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# %% [markdown]
# ## Clean data

# %%
# elements_out_cleaned = clean_elements(elements_out.copy(), bytes_to_string=False,
# replace_quotes=True ,
# clean_non_ascii=False, 
# clean_ordered_list=True ,
# group_paragraphs=True,
# trailing_punctuation=False,
# all_punctuation=False,
# clean_text=True ,
# extra_whitespace=True, 
# dashes=True ,
# bullets=True ,
# lowercase=False)

# %% [markdown]
# ## Add/remove elements to/from metadata



# %% [markdown]
# ### Write to table, dictionary, document format

# %%
### Dataframe format

# elements_out_filtered_df = convert_to_dataframe(elements_out_filtered_meta_mod)

# elements_out_filtered_df.to_csv("table.csv")
# elements_out_filtered_df.head(6)

# # %%
# ### Dictionary format

# elements_out_filtered_dict = convert_to_dict(elements_out_filtered_meta_mod)
# elements_out_filtered_dict[20]

# # %% [markdown]
# # ### Document format for embeddings

# # %%
# doc_sections = write_elements_to_documents(elements_out_filtered_meta_mod, element_types_to_filter)

# doc_sections[0:10]

# # %% [markdown]
# # ### Break text down into chunks

# # %%
# chunks_by_title = chunk_by_title(
#     elements_out_filtered_meta_mod,
#     include_orig_elements=True,
#     combine_text_under_n_chars=minimum_chunk_length,
#     new_after_n_chars=start_new_chunk_after_end_of_this_element_length,
#     max_characters=hard_max_character_length_chunks,
#     multipage_sections=True,
#     overlap_all=True
# )

# chunk_sections, chunk_df = element_chunks_to_document(chunks_by_title, chapter_ids)
# chunk_df.to_csv("chunked_df.csv")
# print(chunk_sections[2])

# # %%
# chunks_basic = chunk_elements(
#     elements_out_filtered_meta_mod,
#     include_orig_elements=True,
#     new_after_n_chars=start_new_chunk_after_end_of_this_element_length,
#     max_characters=hard_max_character_length_chunks,
#     overlap_all=True
# )

# chunk_basic_sections, chunk_basic_df = element_chunks_to_document(chunks_basic, chapter_ids)
# chunk_basic_df.to_csv("chunked_basic_df.csv")

# %% [markdown]
# # Partition Word document
# 
# You cannot get location metadata for bounding boxes from word documents

# %%
# word_filename = "../examples/FINAL Policy and Procedure for Writing Housing Policies.docx"

# # %%
# docx_elements = partition(filename=word_filename)
# for element in docx_elements:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# # %%
# docx_elements[5].text

# # %%
# docx_elements[5].category

# # %%
# docx_elements[5].metadata.to_dict()

# # %% [markdown]
# # ## Find elements associated with chapters

# # %%
# chapter_ids, chapter_to_id = create_title_id_dict(docx_elements)

# chapter_ids

# # %%
# doc_sections = write_elements_to_documents(docx_elements.copy(), chapter_ids)

# # %%
# doc_sections

# # %% [markdown]
# # ### Chunk documents

# # %%
# chunks = chunk_by_title(
#     docx_elements,
#     include_orig_elements=False,
#     combine_text_under_n_chars=0,
#     new_after_n_chars=500,
#     max_characters=1000,
#     multipage_sections=True,
#     overlap_all=True
# )

# # %%
# print(chunks)

# # %%
# chunk_sections = element_chunks_to_document(chunks.copy(), docx_elements.copy(), chapter_ids)

# # %%
# chunk_sections[5].page_content

# # %%
# chunk_sections[5].metadata["true_element_ids"]

# # %%
# for element in docx_elements:
#     if element._element_id in chunk_sections[5].metadata["true_element_ids"]:
#         print(element.text)

# # %% [markdown]
# # # Partition PPTX document

# # %%
# pptx_filename = "../examples/LOTI presentation Jan 2024.pptx"

# # %%
# pptx_elements = partition(filename=pptx_filename)
# for element in pptx_elements[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# # %%
# chapter_ids, chapter_to_id = create_title_id_dict(pptx_elements)
# chapter_ids

# # %%
# pptx_sections = write_elements_to_documents(pptx_elements.copy(), chapter_ids)

# # %%
# pptx_sections

# # %%
# pptx_chunks = chunk_by_title(
#     pptx_elements,
#     include_orig_elements=False,
#     combine_text_under_n_chars=0,
#     new_after_n_chars=500,
#     max_characters=1000,
#     multipage_sections=True,
#     overlap_all=True
# )

# # %%
# pptx_chunk_sections = element_chunks_to_document(pptx_chunks.copy(), pptx_elements.copy(), chapter_ids)

# # %% [markdown]
# # ### Load documents into a vectorDB (Not necessary)

# # %%
# import chromadb

# # %%
# client = chromadb.PersistentClient(path="chroma_tmp", settings=chromadb.Settings(allow_reset=True))
# client.reset()

# # %%
# collection = client.create_collection(
#     name="policy_statements",
#     metadata={"hnsw:space": "cosine"}
# )

# # %%
# chapter_ids

# # %%
# for element in docx_elements:
#     parent_id = element.metadata.parent_id
#     #print(element.text)
#     #print(parent_id)
#     #print(element.metadata.to_dict())
#     if parent_id:
#         try:
#             print(parent_id)
#             chapter = chapter_ids[parent_id]
#             print(chapter)
#         except KeyError:
#             chapter = "None"
#     else:
#         chapter = "None"
#     collection.add(
#         documents=[element.text],
#         ids=[element._element_id],
#         metadatas=[{"chapter": chapter}]
#     )

# # %% [markdown]
# # #### See the elements in the VectorDB and perform hybrid search

# # %%
# results = collection.peek()
# print(results["documents"])

# # %%
# print(collection.metadata)

# # %%
# import json

# result = collection.query(
#     query_texts=["What should policies do?"],
#     n_results=2,
#     where={"chapter": '3.0  Policy Statements'},
# )
# print(json.dumps(result, indent=2))

# # %%
# collection = client.create_collection(
#     name="policy_statements_chunk",
#     metadata={"hnsw:space": "cosine"}
# )

# # %%
# for element in chunks:
#     parent_id = element.metadata.parent_id
#     #print(element.text)
#     #print(parent_id)
#     #print(element.metadata.to_dict())
#     if parent_id:
#         try:
#             print(parent_id)
#             chapter = chapter_ids[parent_id]
#             print(chapter)
#         except KeyError:
#             chapter = "None"
#     else:
#         chapter = "None"

#     print(element._element_id)
#     collection.add(
#         documents=[element.text],
#         ids=[element.orig_elements],
#         metadatas=[{"chapter": chapter}]
#     )

# # %% [markdown]
# # # Partition HTML

# # %%
# html_filename = "../examples/transport-strategy.html"

# # %%
# html_elements = partition(filename=html_filename)
# for element in html_elements[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# # %% [markdown]
# # # Partition image

# # %%
# img_filename = "../examples/example_complaint_letter.jpg"

# # %%
# img_elements = partition(filename=img_filename)
# for element in img_elements[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# # %% [markdown]
# # # Partition XLSX

# # %%
# xlsx_filename = "../examples/fuel-poverty-sub-regional-tables-2020-2018-data.xlsx"

# # %%
# xlsx_elements = partition(filename=xlsx_filename)
# for element in xlsx_elements[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")

# # %% [markdown]
# # # Partition .py

# # %%
# py_filename = "../examples/app.py"

# # %%
# py_elements = partition(filename=py_filename)
# for element in py_elements[:10]:
#     print(f"{element.category.upper()}: {element.text} - Metadata: {element.metadata.to_dict()}")