X-Git-Url: https://git.armaanb.net/?p=phrases.git;a=blobdiff_plain;f=extract.py;h=4a0a6b00cb3b44a01e3b02e54506dcd6197379b7;hp=4e6c1f63544e13e9e434718350714ddd080b9382;hb=HEAD;hpb=f03d1f1de5ed1c36fe3e27505653e669f01a59a3 diff --git a/extract.py b/extract.py index 4e6c1f6..4a0a6b0 100755 --- a/extract.py +++ b/extract.py @@ -1,53 +1,75 @@ #!/usr/bin/env python3 -# Extract Latin famous phrases from wikipedia -# Armaan Bhojwani 2020 +# Extract Latin famous phrases from Wikipedia +# Armaan Bhojwani 2021 import argparse -import sys -import csv +import sqlite3 import requests from bs4 import BeautifulSoup -def main(args=sys.argv[1:]): - # Argument parsing + +def parse_args(): parser = argparse.ArgumentParser( - description="Generate CSV file of Latin famous phrases from Wikipedia.") + description="Generate database of Latin famous phrases from Wikipedia") parser.add_argument("-o", "--output", - default="phrases.csv", - help="set custom output file location") - args = parser.parse_args() - - url = ('https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(' - 'full)&oldid=986793908') - soup = BeautifulSoup(requests.get(url).content, "html.parser") - i = 0 # For the phrase id - - with open(args.output, 'w') as f: - writer = csv.writer(f, lineterminator="\n") - - # write header - headers = ['id', 'Latin', 'English', 'Notes', 'Length'] - writer.writerow(headers) - - # iterate through the tables in the page - list_table = soup.find_all("table", attrs={"class":"wikitable"}) - for table in list_table: - for row in table.tbody.find_all("tr", recursive=False): - cell = row.find_all("td", recursive=False) - rowc = [] - - rowc.append(i) # append phrase id - - # add cell content - for content in cell: - text = (content.get_text(" ", strip=True)).rstrip() - rowc.append(text) - - if len(rowc) > 1: - rowc.append(len(rowc[1])) - writer.writerow(rowc) - i = i + 1 - f.close() + default="phrases.db", + help="set custom output file location") + parser.add_argument("-v", "--version", + action="version", + version="phrases-extract 1.0.3") + return parser.parse_args() + + +def get_html(url): + print("downloading webpage") + return BeautifulSoup(requests.get(url).content, "html.parser") + + +def prep_database(): + print("prepping database") + c.execute("DROP TABLE IF EXISTS phrases") + c.execute("""CREATE TABLE phrases( + id INTEGER, + latin TEXT, + english TEXT, + notes TEXT, + length INTEGER)""") + + +def fill_database(list_table): + i = 0 # phrase id + print("iterating through tables") + for table in list_table: + for row in table.tbody.find_all("tr", recursive=False): + cell = row.find_all("td", recursive=False) + if len(cell) > 2: + print(i, end="\r") + + latin = (cell[0].get_text(" ", strip=True)).rstrip() + english = (cell[1].get_text(" ", strip=True)).rstrip() + notes = (cell[2].get_text(" ", strip=True)).rstrip() + + c.execute("""INSERT INTO phrases + (id, latin, english, notes, length) + VALUES(?, ?, ?, ?, ?)""", + (i, latin, english, notes, len(latin))) + conn.commit() + i = i + 1 + + +def get_tables(): + url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_( + full)&oldid=986793908""") + return get_html(url).find_all("table", attrs={"class": "wikitable"}) + + +def main(): + prep_database() + fill_database(get_tables()) + if __name__ == "__main__": + args = parse_args() + conn = sqlite3.connect(args.output) + c = conn.cursor() main()