desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Function to scrape contents of an unordered list'
def get_ul_contents(self, ulist):
if (ulist is None): return '' text = '' for li in ulist: cur = '' for e in li: try: if (e.name == 'a'): code = e.find('code') if (code is not None): cur += (('<code>' + code.text) + '</code>') elif ((e.name == 'div') and ('highlight-default' in e['class'])): cur += (('<pre><code>' + e.text) + '</code></pre>') elif (e.name == 'code'): cur += (('<code>' + e.text) + '</code>') elif (e.name == 'strong'): cur += str(e) elif (e.name == 'p'): cur += self.get_paragraph_contents(e) except: if (str(type(e)) == "<class 'bs4.element.NavigableString'>"): cur += e.string if (cur != ''): text += (('<li>' + cur) + '</li>') return (('<ul>' + text) + '</ul>')
'Function to scrape the contents of a table'
def get_table_contents(self, table):
if (table is None): return '' tbody = table.find('tbody') text = '' if (tbody is None): return '' trs = tbody.find_all('tr') for tr in trs: td = tr.find('td') th = tr.find('th') text += (('\\n\\n<span class="prog__sub">' + th.text) + '</span>') strong = td.find('strong', recursive=False) if (strong is not None): text += (('<p><strong>' + strong.text) + '</strong></p>') p = td.find_all('p') if (p != []): for e in p: text += (('<p>' + e.text) + '</p>') ul = td.find('ul') text += self.get_ul_contents(ul) return text
'Function to scrape contents of a \'p\' tag'
def get_paragraph_contents(self, paragraph):
p = paragraph if (p is None): return '' final_p = '<p>{}</p>' text = '' for e in p: if (e is not None): try: if (e.name == 'code'): text += '<code>{}</code>'.format(e.string) elif (e.name == 'a'): code = e.find('code') text += '<code>{}</code>'.format(e.string) else: text += e.text except: text += e.string return final_p.format(self.replace_unicodes(text))
'Function to parse the source code of the internals page and write the required functions onto output.txt'
def generate_pages(self, page_name):
elements = self.elements repeated = self.repeated description = self.description title_chck = self.titles if (page_name == 'internals'): soup = self.internals_page elif (page_name == 'events'): soup = self.events_page else: soup = self.exceptions_page dls = soup.find_all('dl') for k in dls: curr_dd = k.find('dd') section = '<section class="prog__container">{}</section>' code = '<pre><code>{}</code></pre>' desc = '{}' header = '<pre><code>{}</code></pre>' code_done = 0 txt = '' p = '' first_p = '' is_first_p = 1 for elem in curr_dd: if ((elem is None) or (str(type(elem)) == "<class 'bs4.element.NavigableString'>")): continue if (elem.name == 'dl'): break elif ((elem.name == 'div') and (('highlight-default' in elem['class']) or ('event-signatures' in elem['class']))): strg = '' if ('event-signatures' in elem['class']): ptag = elem.find('p') p += self.replace_unicodes(self.get_paragraph_contents(ptag)) divtag = elem.find('div', {'class': 'highlight-default'}) strg = divtag.text else: strg = elem.text p = self.replace_unicodes(p) strg = self.replace_unicodes(strg) txt += (desc.format(p) + code.format(strg)) p = '' is_first_p = 0 elif ((elem.name == 'div') and (('admonition' in elem['class']) or ('versionadded' in elem['class']) or ('versionchanged' in elem['class']))): continue elif ((elem.name == 'div') and ('seealso' in elem['class'])): break elif (elem.name == 'table'): p += self.get_table_contents(elem) p = self.replace_unicodes(p) elif (elem.name == 'ul'): p += self.replace_unicodes(self.get_ul_contents(elem)) elif (elem.name == 'p'): p += self.replace_unicodes(self.get_paragraph_contents(elem)) if is_first_p: first_p += elem.text is_first_p = 0 elif (elem is not None): try: p += elem.text except: p += elem cur_dt = k.find('dt') head = '' for d in cur_dt: if ((d is None) or (str(type(d)) == "<class 'bs4.element.NavigableString'>")): head += d.string continue if ((d.name == 'a') and (d['class'][0] == 'headerlink')): break head += d.string head = head.replace(u'\u2192', '&#8594;') head = head.replace('\n', '') header = header.format(head) p = self.replace_unicodes(p) txt += desc.format(p) section = section.format((header + txt)) cur_code = cur_dt.find('code', {'class': 'descname'}) title = '' write = 0 class_list = cur_dt['id'].split('.') for s in class_list: if ((s != 'sqlalchemy') and (s != 'orm')): title += (s + '.') title = title[0:(len(title) - 1)] if (cur_code.text in title_chck): to_write = (title + ' DCTB ') if (cur_code.text in repeated): repeated[cur_code.text].append(title) else: repeated[cur_code.text] = [title] write = 1 else: to_write = (cur_code.text + ' DCTB ') title_chck[cur_code.text] = title to_write += 'A DCTB ' to_write += (' DCTB ' * 9) to_write += (section + ' DCTB ') to_write += ((('http://docs.sqlalchemy.org/en/latest/orm/' + page_name) + '.html#') + cur_dt['id']) to_write += '\n' if write: elements[title] = to_write description[title] = self.replace_unicodes(first_p) description[title] = self.remove_newline(description[title]) else: elements[cur_code.text] = to_write description[cur_code.text] = self.replace_unicodes(first_p) description[cur_code.text] = self.remove_newline(description[cur_code.text])
'Initialize PythonData object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open(self.FILE, 'r') as data_file: self.HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Given raw data, get the relevant sections Args: raw_data: HTML data'
def __init__(self, data_object):
self.parsed_data = [] self.soup = BeautifulSoup(data_object.get_raw_data(), 'html.parser')
'Main loop for parsing main subjects and their sub sections'
def parse_for_data(self):
body_section = self.soup.find('div', {'class': 'section'}) title = self.parse_title(body_section) first_paragraph = self.parse_for_first_paragraph(body_section) example = self.parse_example(body_section) content = self.parse_content(body_section) href = self.soup.find('link', {'rel': 'canonical'}).get('href') section = {'title': title, 'content': content, 'first_paragraph': first_paragraph, 'example': '<br><pre><code>{}</code></pre>'.format(example), 'anchor': href} self.parsed_data.append(section)
'Returns code example for block First searches if there are highlighted divs, then seeks out the correct div for code example. Args: body_soup: BeautifulSoup object containing section body Returns: Code block'
def parse_example(self, body_soup):
highlight = body_soup.find('div', {'class': 'highlight-python'}) if highlight: for p in body_soup.find_all('p'): content = p.get_text() if ('source code' in content): text = p.findNext('div', {'class': 'highlight'}).get_text() return text.strip('\n') return ''
'Return content title Args: body_soup: BeautifulSoup object containing section body Returns: Page title'
def parse_title(self, body_soup):
return str(body_soup.find('h1').get_text()).replace('\xc2\xb6', '')
'Returns all paragraphs as text, joined by two new lines Args: body_soup: BeautifulSoup object containing section body Returns: All paragraphs as text'
def parse_content(self, body_soup):
paragraphs = body_soup.find_all('p') content = [] for paragraph in paragraphs: content.append(paragraph.get_text()) return '\n\n'.join(content)
'Returns the first paragraph of text for a given function Fixes up double spacing and newlines. Args: section: A section of parsed HTML that represents a function definition Returns: First paragraph found with text'
def parse_for_first_paragraph(self, section):
paragraphs = section.find_all('p') for paragraph in paragraphs: if paragraph.text: return paragraph.text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n') return ''
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
for data_element in self.data: title = data_element.get('title') abstract = self.truncate(data_element.get('first_paragraph'), 400) example = data_element.get('example') if example: abstract = '{}<br>{}'.format(abstract, example) abstract = abstract.replace('\n', '\\n') anchor = data_element.get('anchor') list_of_data = [title, 'A', '', '', '', '', '', '', URL_ROOT, '', '', abstract, anchor] self.output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
'Initialize DjangoData object. Load data from HTML.'
def __init__(self, page_name):
self.DJANGO_HTML = '' self.load_data(page_name)
'Open the HTML file and load it into the object.'
def load_data(self, page_name):
with open(('download/' + page_name), 'r') as data_file: self.DJANGO_HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.DJANGO_HTML
'Given raw data, get the relevant sections Args: raw_data: HTML data'
def __init__(self, raw_data, section_name, page_url):
self.parsed_data = None self.url = page_url soup_data = BeautifulSoup(raw_data, 'html.parser') doc_content = soup_data.find('div', {'id': 'docs-content'}) tags = doc_content.find('div', {'class': 'section', 'id': section_name}) self.tag_sections = tags.find_all('div', {'class': 'section'})
'Find the name and anchor for a given section Args: section: A section of parsed HTML that represents a section Returns: name: Name of the Element anchor: Anchor tag to use when linking back to docs (ie #autoescape)'
def parse_name_and_anchor_from_data(self, section, heading_style):
name = '' anchor = '' heading = section.find(heading_style) if heading: code = heading.find('code', {'class': 'docutils'}) if code: name = code.find('span', {'class': 'pre'}).string a_tag = heading.find('a', {'class': 'headerlink'}) if a_tag: anchor = a_tag['href'] return (name, anchor)
'Get the first paragraph for display Args: section: A section of parsed HTML that represents a Element Returns: First paragraph in the HTML'
def parse_first_paragraph_from_data(self, section, pstyle):
if (pstyle == 'p'): try: return section.find('p').text.replace('\n', ' ') except: return '' elif (pstyle == 'dt'): try: dtname = section.find('dt').text.replace('\n', ' ') if ('source' in dtname): dtname = dtname[:(-9)] elif ('\xc2\xb6' in dtname): dtname = dtname[:(-1)] return dtname except: return ''
'Get the second paragraph for display Args: section: A section of parsed HTML that represents a Element Returns: second paragraph in the HTML'
def parse_second_paragraph_from_data(self, section, pstyle):
if (pstyle == 'p'): try: para = section.find_all('p')[1] para = para.text.partition('. ') return ''.join(para[:2]).replace('\n', ' ') except: return '' elif (pstyle == 'dt'): para = section.find_all('p')[0] if para: para = para.text.partition('. ') return ''.join(para[:2]).replace('\n', ' ') else: return ''
'Look for an example code block to output Args: section: A section of parsed HTML that represents a section Returns: Formatted code string'
def parse_code_from_data(self, section):
code = section.find('div', {'class': 'highlight'}) if code: return '<pre><code>{}</code></pre>'.format(code.text.replace('\n', '\\n')) return ''
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self, code_or_second_para, hstyle, pstyle):
parsing_data = [] for section in self.tag_sections: (name, anchor) = self.parse_name_and_anchor_from_data(section, hstyle) first_paragraph = self.parse_first_paragraph_from_data(section, pstyle) if (code_or_second_para == 'code'): code = self.parse_code_from_data(section) second_paragraph = '' elif (code_or_second_para == 'para'): second_paragraph = self.parse_second_paragraph_from_data(section, pstyle) code = '' data_elements = {'name': name, 'anchor': anchor, 'first_paragraph': first_paragraph, 'second_paragraph': second_paragraph, 'code': code, 'url': self.url} parsing_data.append(data_elements) self.parsed_data = parsing_data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Iterate through the data and create the needed output1.txt file.'
def create_file(self):
with open('output1.txt', 'a+') as output_file: for data_element in self.data: if data_element.get('name'): name = data_element.get('name') if ('()' in name): name = name[:(-2)] code = data_element.get('code') first_paragraph = (('<p>' + data_element.get('first_paragraph')) + '</p>') second_paragraph = (('<p>' + data_element.get('second_paragraph')) + '</p>') abstract = '{}{}{}'.format((first_paragraph + second_paragraph), '', code) abstract = (('<section class="prog__container">' + abstract) + '</section>') url = '{}{}'.format(data_element.get('url'), data_element.get('anchor')) list_of_data = [name, 'A', '', '', '', '', '', '', DJANGO_HOME, '', '', abstract, url] output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
'Get all the spark scala api class file need to be parsed'
def __init__(self):
self.files_to_parse = glob.glob('download/*.html')
'Parse each of spark scala apit class file and make a Description object for each'
def parse_comment(self):
with open('output.txt', 'wb') as output: for file in self.files_to_parse: soup = BeautifulSoup(open(file), 'html.parser') name = file.split('/')[1].replace('.html', '') source_url = '{}{}.html'.format(spark_scala_base_url, name.replace('.', '/')) comment_element = soup.find('div', id='comment').find(class_='comment cmt') if (not comment_element): continue description_with_annotation = u' '.join([p.text for p in comment_element.find_all('p')]) description_with_annotation_list = description_with_annotation.split(u'::') annotation = None assert ((len(description_with_annotation_list) == 3) or (len(description_with_annotation_list) == 1)), name if (len(description_with_annotation_list) == 3): annotation = unicode.strip(description_with_annotation_list[1]) description = unicode.strip(description_with_annotation_list[2]) elif (len(description_with_annotation_list) == 1): description = unicode.strip(description_with_annotation_list[0]) description = u' '.join(unicode.split(description)) description_object = Description(name, annotation, description, source_url) output.write((description_object.get_description() + '\n').encode('utf-8'))
'Initialize PythonData object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with codecs.open(self.FILE, 'r', encoding='utf-8', errors='ignore') as data_file: (document, errors) = tidy_document(data_file.read()) self.HTML = document
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Given raw data, get the relevant sections Args: raw_data: HTML data'
def __init__(self, data_object):
self.file_being_used = data_object.get_file() self.file_name = self.file_being_used.split('/').pop() self.main_sections = [] self.sub_sections = [] self.soup = BeautifulSoup(data_object.get_raw_data(), 'html.parser')
'Main loop for parsing main subjects and their sub sections'
def parse_for_data(self):
for header in self.soup.find_all('h2'): section = self.parse_section(header) self.main_sections.append(section) for header in self.soup.find_all('h3'): section = self.parse_section(header) section['example'] = self.clean_unicode_numerals(self.get_example(header)) section['abstract'] = '<section class="prog__container"><p>{}</p><pre><code>{}</code></pre></section>'.format(section['paragraph'], section['example']) self.sub_sections.append(section) self.parsed_data = (self.main_sections + self.sub_sections)
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Gets the next code example after soup tag object Args: Soup tag object Returns: Text inside <pre> tag'
def get_example(self, soup):
text = soup.findNext('pre').get_text() text = text.strip('\n') return text.replace('\n', '\\n')
'Parses url to the content Args: Soup tag object Returns: Url with anchor'
def get_url(self, soup):
anchor = soup.get('id') if anchor: parsed_url = '{}#{}'.format(os.path.join(URL_ROOT, self.file_name), anchor) else: parsed_url = os.path.join(URL_ROOT, file_name) return parsed_url
'Parses main and sub sections in soup object. Args: soup: Soup tag object Returns: Section data, containing title, first paragraph and url to content.'
def parse_section(self, soup):
first_paragraph = self.clean_formatting(soup.findNext('p').get_text()) title = self.clean_formatting(soup.get_text()) anchor = self.get_url(soup) url = os.path.join(URL_ROOT, self.file_name) return {'title': title, 'paragraph': first_paragraph, 'anchor': anchor, 'url': url}
'Fixes up some weird double spacing and newlines. Args: text: Piece of text to be fixed. Returns: Given text without double spacing and new lines.'
def clean_formatting(self, text):
text = text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n') text = text.replace('Continuing from the previous example:', '') return self.clean_unicode_numerals(text.strip())
'Fixes circled unicode numbers used in text. Args: text: Text to be fixed. Returns: Text without ① .. ⑲ characters.'
def clean_unicode_numerals(self, text):
start = 2460 end = 2473 for hexchar in range(start, end): character = chr(int(str(hexchar), 16)) text = text.replace(character, '') return text
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
with open('output.txt', 'w') as output_file: for data_element in self.data: title = data_element.get('title') first_paragraph = data_element.get('first_paragraph') abstract = (data_element.get('abstract') or first_paragraph) if (abstract is None): continue anchor = data_element.get('anchor') url = data_element.get('url') list_of_data = [] entry = self.get_data('A', title=title, url=url, abstract=abstract, anchor=anchor) list_of_data.append(entry) replace_dict = {'Coercing': 'Coerce', 'Creating': 'Create', 'Slicing': 'Slice', 'Searching': 'Search', 'Removing': 'Remove', 'Assigning': 'Assign', 'Creating': 'Create', 'Modifying': 'Modify'} for (key, value) in replace_dict.items(): if (key in title): alternate_title = title.replace(key, value) entry = self.get_data('R', alternate_title, redirect_data=title) list_of_data.append(entry) entries = [] for data in list_of_data: if ((' A ' in data[0]) and (data[1] == 'A')): alternate_title = data[0].replace(' A ', ' ') entry = self.get_data('R', alternate_title, redirect_data=data[0]) entries.append(entry) elif ((' A ' in data[2]) and (data[1] == 'R')): alternate_title = data[0].replace(' A ', ' ') entry = self.get_data('R', alternate_title, redirect_data=data[2]) entries.append(entry) list_of_data += entries for data in list_of_data: tsv = '{}\n'.format(' DCTB '.join(data)) output_file.write(tsv)
'Parses downloaded PyMongo API documentation files :param baseurl: BaseURL of api docs :param downloaddir: Directory of downloaded API docs'
def __init__(self, baseurl, downloaddir):
self.baseurl = baseurl self.downloaddir = downloaddir self.api_files = [] self.get_api_doc_files() self.output = '' self.print_output()
'Reads index.html of downloaded API docs and parses for links to api docs files'
def get_api_doc_files(self):
self.api_files = [] entry_html = open((self.downloaddir + 'index.html')) soupy = BeautifulSoup(entry_html, 'html.parser') for element in soupy.find_all('a', {'class': 'reference internal', 'href': re.compile('^.*\\.html$')}): self.api_files.append((self.downloaddir + element['href'])) del soupy
'Parses found files, prints formatted output for DDG Fathead to stdout.'
def print_output(self):
for filename in self.api_files: html_data = open(filename).read() soupy = BeautifulSoup(html_data, 'html.parser') for element in soupy.find_all('dl', {'class': 'class'}): anchor_link = '' if element.find('a', {'class': 'headerlink'}): anchor_link = element.find('a', {'class': 'headerlink'})['href'] code = '' description = '' tags_to_replace = ['method', 'classmethod', 'function', 'attribute', 'exception'] for tag_to_replace in tags_to_replace: for tag in element.find_all('dl', {'class': tag_to_replace}): tag_code = '' tag_description = '' for item in tag.dt.contents: tag_code += str(item) tag_code = self._clean_html_tags(tag_code) for item in tag.dd.contents: tag_description += str(item) tag_description = self._clean_html_tags(tag_description) abstract = ((((('<section class="prog_container">' + '<pre><code>') + tag_code) + '</pre></code><p>') + tag_description) + '</p></section>') tag_title = tag.dt['id'] with open('redirects.txt', 'a') as f: f.write('{}, {}\n'.format(tag_title.replace('.', ' '), tag_title)) tag_anchor_link = '' if tag.find('a', {'class': 'headerlink'}): tag_anchor_link = tag.find('a', {'class': 'headerlink'})['href'] filename_removed_dir = filename.replace('download/', '') headerlink = ((self.baseurl + filename_removed_dir) + tag_anchor_link) output_line = '' output_line += (tag_title + ' DCTB ') output_line += ('A' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += (abstract + ' DCTB ') output_line += headerlink if output_line: print output_line tag.decompose() output_line = '' for item in element.dt.contents: code += str(item) code = self._clean_html_tags(code) code = self._clean_code_tags(code) for item in element.dd.contents: description += str(item) description = self._clean_html_tags(description) abstract = ((((('<section class="prog_container">' + '<pre><code>') + code) + '</pre></code><p>') + description) + '</p></section>') title = element.dt['id'] with open('redirects.txt', 'a') as f: f.write('{}, {}\n'.format(title.replace('.', ' '), title)) filename_removed_dir = filename.replace('download/', '') headerlink = ((self.baseurl + filename_removed_dir) + anchor_link) output_line += (title + ' DCTB ') output_line += ('A' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += (abstract + ' DCTB ') output_line += headerlink if output_line: print output_line element.decompose() tags_to_replace = ['method', 'classmethod', 'function', 'attribute', 'exception', 'describe', 'data'] for tag_to_replace in tags_to_replace: for tag in soupy.find_all('dl', {'class': tag_to_replace}): anchor_link = '' if tag.find('a', {'class': 'headerlink'}): anchor_link = tag.find('a', {'class': 'headerlink'})['href'] tag_code = '' tag_description = '' for item in tag.dt.contents: tag_code += str(item) tag_code = self._clean_html_tags(tag_code) tag_code = self._clean_code_tags(tag_code) for item in tag.dd.contents: tag_description += str(item) tag_description = self._clean_html_tags(tag_description) output_line = '' abstract = ((((('<section class="prog_container">' + '<pre><code>') + tag_code) + '</pre></code><p>') + tag_description) + '</p></section>') title = tag.dt['id'] with open('redirects.txt', 'a') as f: f.write('{}, {}\n'.format(title.replace('.', ' '), title)) tag.decompose() filename_removed_dir = filename.replace('download/', '') headerlink = ((self.baseurl + filename_removed_dir) + anchor_link) output_line += (title + ' DCTB ') output_line += ('A' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += (abstract + ' DCTB ') output_line += headerlink if output_line: print output_line
'Cleans <code> tags from html. Should only be used in \'code" section to prevent <code> tags from doubling up within the <pre><code> section :param code_html_to_clean: String of html to remove code tags :return: html with removed code tags'
def _clean_code_tags(self, code_html_to_clean):
html_soup_cleaner = BeautifulSoup(code_html_to_clean, 'html.parser') for tag in html_soup_cleaner.find_all('code'): if tag.attrs: del tag.attrs del tag.name for tag in html_soup_cleaner.find_all('table'): if tag.attrs: del tag.name del tag.attrs if tag.ul: tag.contents = tag.ul.contents tmp_tag = html_soup_cleaner.new_tag('span') tmp_tag.attrs = {'class': 'prog__sub'} tmp_tag.string = 'Parameters' tag.insert(0, tmp_tag) cleaned_html = str(html_soup_cleaner) cleaned_html = cleaned_html.replace('<None>', '') cleaned_html = cleaned_html.replace('</None>', '') cleaned_html = cleaned_html.replace('<none>', '') cleaned_html = cleaned_html.replace('</none>', '') while ('<p></p>' in cleaned_html): cleaned_html = cleaned_html.replace('<p></p>', '') cleaned_html = cleaned_html.strip() return cleaned_html
'Cleans unwanted tags from html for DDG Fathead output :param html_to_clean: string of html to clean :return: string of cleaned html'
def _clean_html_tags(self, html_to_clean):
html_soup_cleaner = BeautifulSoup(html_to_clean, 'html.parser') for tag in html_soup_cleaner.find_all('code'): if tag.attrs: del tag.attrs for tag in html_soup_cleaner.find_all('div', {'class': 'highlight-default'}): tag.name = 'pre' del tag.attrs tag.next_element.name = 'code' del tag.next_element.attrs tag.next_element.next_element.name = 'span' tags_to_decompose = ['versionmodified', 'versionadded', 'versionchanged'] for tag_to_decompose in tags_to_decompose: for tag in html_soup_cleaner.find_all('span', {'class': re.compile(tag_to_decompose)}): tag.decompose() for tag in html_soup_cleaner.find_all('div', {'class': re.compile(tag_to_decompose)}): tag.decompose() tags_to_replace = ['div', 'blockquote'] for tag_to_replace in tags_to_replace: regex_pattern = '^{}*'.format(tag_to_replace) for tag in html_soup_cleaner.find_all(re.compile(regex_pattern)): del tag.name if tag.attrs: del tag.attrs regex_pattern = '^{}*'.format('span') for tag in html_soup_cleaner.find_all(re.compile(regex_pattern)): if tag.attrs: if (tag.attrs == {'class': ['prog__sub']}): pass else: del tag.attrs del tag.name else: del tag.name for tag in html_soup_cleaner.find_all('a'): if tag.attrs: del tag.name del tag.attrs for tag in html_soup_cleaner.find_all('p'): if tag.attrs: tag.decompose() for tag in html_soup_cleaner.find_all('table'): if tag.attrs: del tag.name del tag.attrs if tag.ul: tag.contents = tag.ul.contents tmp_tag = html_soup_cleaner.new_tag('span') tmp_tag.attrs = {'class': 'prog__sub'} tmp_tag.string = 'Parameters' tag.insert(0, tmp_tag) for tag in html_soup_cleaner.find_all('em', {'class': 'property'}): if tag.attrs: del tag.attrs for tag in html_soup_cleaner.find_all('cite'): tag.name = 'code' cleaned_html = str(html_soup_cleaner) cleaned_html = cleaned_html.replace('\xc2\xb6', '') cleaned_html = cleaned_html.replace('<None>', '') cleaned_html = cleaned_html.replace('</None>', '') cleaned_html = cleaned_html.replace('<none>', '') cleaned_html = cleaned_html.replace('</none>', '') cleaned_html = cleaned_html.strip() cleaned_html = cleaned_html.replace('\n', '\\n') while ('\\n\\n' in cleaned_html): cleaned_html = cleaned_html.replace('\\n\\n', '\\n') while ('<p></p>' in cleaned_html): cleaned_html = cleaned_html.replace('<p></p>', '') return cleaned_html
'Initialize Data object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open(self.FILE, 'r') as data_file: self.HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Given raw data, get the relevant sections Args: raw_data: HTML data'
def __init__(self, data_object, titles):
self.titles = titles self.parsed_data = None self.topic_sections = [] self.file_being_used = data_object.get_file() self.soup_data = BeautifulSoup(data_object.get_raw_data(), 'html.parser') table_of_contents = self.soup_data.find(class_='maruku_toc') sections = table_of_contents.find_all('li') for section in sections: section_id = section.find('a') section_id = section_id['href'] heading = self.soup_data.find(id=section_id[1:]) self.topic_sections.append(heading)
'Returns the section name Args: section: A section of parsed HTML that represents a topic Returns: Name of topic'
def parse_for_name(self, section):
name = section.text if (name in self.titles.keys()): info = self.titles[name] if (info[0].strip() != 'None'): return info[0].strip() else: return name else: return None
'Returns any redirects for article Args: section: A section of parsed HTML that represents a topic Returns: list of redirects'
def parse_for_redirects(self, section):
name = section.text if (name in self.titles.keys()): info = self.titles[name] if (info[1].strip() != 'None'): return info[1].strip().split(',') else: return [] else: return []
'Returns the section id for topic Args: section: A section of parsed HTML that represents a topic Returns: id of section'
def parse_for_id(self, section):
return ('#' + section.get('id'))
'Returns the topic description Fixes up some weird double spacing and newlines. Args: section: A section of parsed HTML that represents a topic Returns: topic description'
def parse_for_description(self, section):
next_para = section.find_next('p') description = (('<p>' + str(next_para.text.encode('utf-8'))) + '</p>') next_tag = next_para.find_next_sibling() if ((next_tag.name == 'pre') or (next_tag.name == 'code')): text = str(next_tag.encode('utf-8')) text = '\\n'.join(text.split('\n')) description = (description + text) return description
'Helper method to create URL back to document Args: anchor: #anchor Returns: Full URL to function on the sass doc'
def create_url(self, id):
return (SASS_DOC_BASE_URL + id)
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self):
data = [] names = [] for topic_section in self.topic_sections: name = self.parse_for_name(topic_section) if name: description = self.parse_for_description(topic_section) id = self.parse_for_id(topic_section) url = self.create_url(id) redirect = self.parse_for_redirects(topic_section) if (name in names): index = names.index(name) data_elements = data[index] data_elements['description'] += description data_elements['redirects'].extend(redirect) else: names.append(name) data_elements = {'name': name, 'description': description, 'url': url, 'redirects': redirect} data.append(data_elements) self.parsed_data = data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
with open('output.txt', 'a') as output_file: for data_element in self.data: if data_element.get('name'): description = (('<section class="prog__container">' + data_element.get('description')) + '</section>') url = data_element.get('url').encode('utf-8') name = data_element.get('name').encode('utf-8') redirect = data_element.get('redirects') list_of_data = [name, 'A', '', '', '', '', '', '', '', '', '', description, url] line = ' DCTB '.join(list_of_data) output_file.write((line + '\n'))
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_redirects(self):
with open('output.txt', 'a') as output_file: for data_element in self.data: if data_element.get('name'): name = data_element.get('name').encode('utf-8') redirects = data_element.get('redirects') for redirect in redirects: list_of_data = [redirect.strip(), 'R', name, '', '', '', '', '', '', '', '', '', ''] line = ' DCTB '.join(list_of_data) output_file.write((line + '\n'))
'Initialize Data object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open(self.FILE, 'r') as data_file: self.HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Given raw data, get the relevant sections Args: raw_data: HTML data'
def __init__(self, data_object):
self.parsed_data = None self.function_sections = [] self.file_being_used = data_object.get_file() self.soup_data = BeautifulSoup(data_object.get_raw_data(), 'html.parser') sections = self.soup_data.find_all('dl', {'class': None}) for section in sections: function_names = section.find_all('dt') function_descriptions = section.find_all('dd') for i in range(len(function_names)): self.function_sections.append([function_names[i], function_descriptions[i]])
'Returns the function name Args: section: A section of parsed HTML that represents a function definition Returns: Name of function'
def parse_for_function_name(self, section):
function_name = section.find('a') if function_name: return function_name.text.split('(')[0] return ''
'Returns the function description Fixes up some weird double spacing and newlines. Args: section: A section of parsed HTML that represents a function description Returns: Function description'
def parse_for_description(self, section):
return section.text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n')
'Returns the anchor link to specific function doc Args: section: A section of parsed HTML that represents a function definition Returns: The href value of the link to doc'
def parse_for_anchor(self, section):
a_tag = section.find('a') if a_tag: return a_tag['href'] return ''
'Returns the method signature Args: section: A section of parsed HTML that represents a function definition Returns: The method signature'
def parse_for_method_signature(self, section):
method_sig = section.find('a') if method_sig: return (('<pre><code>' + method_sig.text) + '</code></pre>') return ''
'Return a div containing more information about the section function Args: section: A section of parsed HTML that represents a function definition Returns: A div element'
def parse_for_section_div(self, section):
anchor = self.parse_for_anchor(section) heading = self.soup_data.find(id=anchor[1:]) return heading.parent
'Return example code for section function Args: section: A section of parsed HTML that represents a function definition Returns: A string or None if there are no examples'
def parse_for_example(self, section):
info = self.parse_for_section_div(section) example = info.find('div', {'class': 'examples'}) if example: code = example.find('pre') text = ('<span class="prog__sub">Example</span>' + str(code)) text = '\\n'.join(text.split('\n')) return text return None
'Return parameter information for section function Args: section: A section of parsed HTML that represents a function definition Returns: A string or None if there are no parameters'
def parse_for_parameters(self, section):
info = self.parse_for_section_div(section) parameters = info.find('ul', {'class': 'param'}) if parameters: code = self.fix_parameter_links(parameters) text = '<span class="prog__sub">Parameters</span><ul>' code = code.find_all('li') for parameter in code: text = (text + '<li>') name = parameter.find('span', {'class': 'name'}) if name: text = (text + name.text) inline = parameter.find('div', {'class': 'inline'}) if inline: inline = parameter.find('p') inline = str(inline) inline = inline.replace('\xe2\x80\x99', '&#39;') inline = inline.strip('<p>') inline = inline.strip('</p>') text = ((text + ' - ') + inline) text = (text + '</li>') text = (text + '</ul>') return text return None
'Return corrected links for parameter information Args: parameters - html code repersenting parameters Return: HMTL code repersenting parameter with correct links'
def fix_parameter_links(self, parameters):
for a in parameters.findAll('a'): path = a['href'] a['href'] = a['href'].replace(a['href'], ('http://sass-lang.com/documentation/Sass/Script/' + path)) return parameters
'Helper method to create URL back to document Args: anchor: #anchor Returns: Full URL to function on the sass doc'
def create_url(self, anchor):
return (SASS_DOC_BASE_URL + anchor)
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self):
data = [] functions = [] for function_section in self.function_sections: function = self.parse_for_function_name(function_section[0]) if function: method_signature = self.parse_for_method_signature(function_section[0]) description = self.parse_for_description(function_section[1]) anchor = self.parse_for_anchor(function_section[0]) example = self.parse_for_example(function_section[0]) parameter = self.parse_for_parameters(function_section[0]) abstract = ((('<p>' + description) + '</p>') + method_signature) if example: abstract = (abstract + example) if parameter: abstract = ('%s%s' % (abstract, parameter)) abstract = (('<section class="prog__container">' + abstract) + '</section>') url = self.create_url(anchor) if (function in functions): index = functions.index(function) data_elements = data[index] data_elements['abstract'] += abstract else: functions.append(function) data_elements = {'function': function, 'abstract': abstract, 'url': url} data.append(data_elements) self.parsed_data = data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Figure out the name of the function. Will contain the module name if one exists. Args: data_element: Incoming data dict Returns: Name, with whitespace stripped out'
def create_names_from_data(self, data_element):
function = data_element.get('function') dotted_name = '{}{}{}'.format(function, ('.' if function else ''), function) spaced_name = '{} {}'.format(function, function) return (dotted_name.strip(), spaced_name.strip())
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
with open('output.txt', 'a') as output_file: for data_element in self.data: if data_element.get('function'): name = data_element.get('function').encode('utf-8') abstract = data_element.get('abstract').encode('utf-8') url = data_element.get('url').encode('utf-8') list_of_data = [name, 'A', '', '', 'sass functions', '', '', '', '', '', '', abstract, url] line = ' DCTB '.join(list_of_data) output_file.write((line + '\n'))
'Iterate through the data and add redirects to output.txt file, appending to file as necessary.'
def create_redirect(self):
with open('output.txt', 'a') as output_file: for data_element in self.data: if data_element.get('function'): name = data_element.get('function').encode('utf-8') abstract = data_element.get('abstract').encode('utf-8') url = data_element.get('url').encode('utf-8') list_of_data = [(name + ' function'), 'R', name, '', '', '', '', '', '', '', '', '', ''] line = ' DCTB '.join(list_of_data) output_file.write((line + '\n'))
'Output the python six information in the proper format required for DuckDuckGo Fatheads'
def __str__(self):
code = ('<pre><code>{}</code></pre>'.format(self.usage.replace('\n', '\\n').replace(' DCTB ', ' ')) if self.usage else '') if code: abstract = '{}{}'.format(self.description, code) else: abstract = self.description abstract = '<section class="prog__container">{}</section>'.format(abstract) return ' DCTB '.join([self.name, 'A', '', '', '', '', '', '', '', '', '', abstract, '{}/{}'.format(python_six_base_url, self.filename)])
'Get all files that need to be parsed'
def __init__(self):
self.files_to_parse = glob.glob('download/*.html')
'Parse module and make a SixModule object for each'
def parse_module(self):
self.six_module = [] for file in self.files_to_parse: soup = BeautifulSoup(open(file), 'html.parser') div = soup.find('div', {'class': 'section'}) if (not div): continue for tag in div.find_all('dl'): 'Get the module name ' if (tag.dt.select('code')[1].get_text(strip=True) != 'six.'): module_name = tag.dt.select('code')[1].get_text(strip=True) else: module_name = tag.dt.select('code')[2].get_text(strip=True) 'Get the module desc ' description = tag.dd.p.getText() 'Get code for module if present ' code = (tag.dd.div.getText() if tag.dd.div else None) module = SixModule(module_name, description, file.replace('download/', '')) module.usage = code self.six_module.append(module)
'Flask documentation contains \'¶\' symbol to represent hyperlinks which needs to be removed. Double spaces needs to be converted to single spaces. HTML symbol xbb causes problems in postgres, needs to be escaped.'
def clean_formatting(self, text):
text = text.replace(' ', ' ').replace('\n', '\\n').replace('\xc3\x82\xc2\xb6', '').replace('\\xbb', '\\\\xbb').strip() return text
'CoverageClass - Class object scraped from Coverage API documentation of a class :param raw_data: HTML of class scraped from documentation :param base_url: URL of API Documentation'
def __init__(self, raw_data, base_url, base_title):
self.raw_data = raw_data self.base_url = base_url self.base_title = base_title self.method_list = [] self.class_name = '' self.parse_raw_class_data_into_methods()
'Used to get list of methods parsed from class :return: List of methods parsed from class'
def get_method_list(self):
return self.method_list
'Used to get number of methods :return: number of methods'
def get_method_count(self):
return len(self.method_list)
'Scrapes methods from html of class object'
def parse_raw_class_data_into_methods(self):
soupy = BeautifulSoup(self.raw_data, 'html.parser') self.class_name = soupy.dt['id'] methods = soupy.find_all('dl', {'class': 'method'}) for method in set(methods): self.method_list.append(method)
'Returns method from method list at method_index :param method_index: Index of method to return raw data :return: raw html data of method'
def get_raw_method(self, method_index):
return self.method_list[method_index]
''
def _clean_html_tags(self, html_to_clean):
html_soup_cleaner = BeautifulSoup(html_to_clean, 'html.parser') for tag in html_soup_cleaner.find_all('div', {'class': 'highlight-default'}): tag.name = 'pre' del tag.attrs tag.next_element.name = 'code' del tag.next_element.attrs tag.next_element.next_element.name = 'span' tags_to_replace = ['code', 'span', 'div', 'blockquote'] for tag_to_replace in tags_to_replace: regex_pattern = '^{}*'.format(tag_to_replace) for tag in html_soup_cleaner.find_all(re.compile(regex_pattern)): del tag.name if tag.attrs: del tag.attrs for tag in html_soup_cleaner.find_all('a'): if tag.attrs: del tag.name del tag.attrs for tag in html_soup_cleaner.find_all('ul'): if tag.attrs: del tag.attrs for tag in html_soup_cleaner.find_all('cite'): tag.name = 'code' cleaned_html = str(html_soup_cleaner) cleaned_html = cleaned_html.replace('<None>', '') cleaned_html = cleaned_html.replace('</None>', '') cleaned_html = cleaned_html.replace('\n', '\\n').strip('\\n ') return cleaned_html
'Parses method html into json object :param method: raw html data of method :return: json object of parsed values'
def get_parsed_method_from_data(self, method):
attributes = {} html_soup = BeautifulSoup(str(method), 'html.parser') attributes['base_url'] = self.base_url attributes['base_title'] = self.base_title attributes['id'] = html_soup.dt['id'] attributes['title'] = ((attributes['id'].split('.')[1] + ' ') + attributes['id'].split('.')[2]) attributes['headerlink'] = '{}{}'.format(self.base_url, html_soup.a['href']) attributes['code'] = '' code_line = re.search('^<code.*\n', method, re.MULTILINE).group(0) code_line = code_line[0:(code_line.rindex('</span>') + len('</span>'))] attributes['code'] = self._clean_html_tags(code_line) temp_desc = '' for (index, value) in enumerate(html_soup.dd): temp_desc += str(value) attributes['description'] = self._clean_html_tags(temp_desc) attributes['abstract'] = ((((('<section class="prog_container">' + '<pre><code>') + attributes['code']) + '</pre></code><p>') + attributes['description']) + '</p></section>') return attributes
'Takes attributes from method, outputs as expected for DDG fathead\'s output.txt :param method_index: int index of method to parse for output :return: single line string of parsed output'
def get_parsed_method_suitable_for_output(self, method_index):
output_line = '' attributes = self.get_parsed_method_from_method_index(method_index) output_line += (attributes['title'] + ' DCTB ') output_line += ('A' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += ('' + ' DCTB ') output_line += (attributes['abstract'] + ' DCTB ') output_line += (attributes['headerlink'] + '\n') return output_line
'Return parsed method using method index :param method_index: index in method list of method to parse :return: json object of parsed method'
def get_parsed_method_from_method_index(self, method_index):
raw_method = str(self.get_raw_method(method_index)) return self.get_parsed_method_from_data(raw_method)
'Given a URL of the Coverage API, will find and create CoverageClass objects :param URL: URL of entry to CoverageAPI Documentation'
def __init__(self, URL='https://coverage.readthedocs.io/en/latest/api.html'):
self.URL = URL self.classes = [] self.api_urls = []
'Gets output for all methods in class :return: string parsed to output.txt format expected by DDG'
def get_output(self):
output = '' for class_instance in self.classes: method_count = class_instance.get_method_count() for x in range(0, (method_count - 1)): output += class_instance.get_parsed_method_suitable_for_output(x) return output
'Parses URL for HTML :param url: URL to be parsed :return: json object of URL and html data'
def get_html_data_from_url(self, url):
html_data = requests.get(url).text return_data = {'URL': url, 'html_data': html_data} return return_data
'Parses URL for HTML :param url: URL where file retrieved :param filename: filename of html data to parse :return: json object of URL and html data'
def get_html_data_from_file(self, url, filename):
html_data = open(filename).read() url = url[:url.rindex('/')] if filename.find('/'): filename = filename[(filename.rindex('/') + 1):] url = '{}/{}'.format(url, filename) return_data = {'URL': url, 'html_data': html_data} return return_data
'Sets/returns API URLs found in class HTML :return: API URLs found in parsing class'
def get_api_urls(self):
if (self.api_urls == []): self.find_api_doc_urls(self.URL) return self.api_urls else: return self.api_urls
'Finds URLs from primary entry URL of documentation links :param url: URL to be parsed to retrieve documentation links :return: list of URLs which appear to be links to CoverageClass documentation'
def find_api_doc_urls(self, url):
html_data = requests.get(url).text html_soup = BeautifulSoup(html_data, 'html.parser') url_base = url[0:(url.rindex('/') + 1)] api_url_list = [] api_urls = html_soup.find_all('a', {'class': 'reference internal', 'href': re.compile('^api.*\\.html')}, text=re.compile('.*class(es)?')) for url in set(api_urls): api_url_list.append('{}{}'.format(url_base, url['href'])) self.api_urls = api_url_list
'Parses raw HTML data from URL of API for Coverage data classes, stores in object :param json_url_object: json object of URL and html_data to be parsed'
def parse_data_for_classes(self, json_url_object):
html_soup = BeautifulSoup(json_url_object['html_data'], 'html.parser') title = html_soup.title.text class_data = html_soup.find_all('dl', {'class': 'class'}) for class_instance in class_data: self.classes.append(CoverageClass(str(class_instance), json_url_object['URL'], title))