X-Git-Url: https://git.armaanb.net/?a=blobdiff_plain;f=extract.py;h=c83755fd1caf2f655299112cb37bb86e05caf9dd;hb=832a355da65ad719cac5712d5fabcee41b64c602;hp=4e6c1f63544e13e9e434718350714ddd080b9382;hpb=f03d1f1de5ed1c36fe3e27505653e669f01a59a3;p=phrases.git diff --git a/extract.py b/extract.py index 4e6c1f6..c83755f 100755 --- a/extract.py +++ b/extract.py @@ -3,51 +3,70 @@ # Armaan Bhojwani 2020 import argparse -import sys -import csv +import sqlite3 import requests from bs4 import BeautifulSoup -def main(args=sys.argv[1:]): - # Argument parsing +def parse_args(): parser = argparse.ArgumentParser( - description="Generate CSV file of Latin famous phrases from Wikipedia.") + description="Generate SQLite db of Latin famous phrases from Wikipedia.") parser.add_argument("-o", "--output", - default="phrases.csv", + default="phrases.db", help="set custom output file location") - args = parser.parse_args() + parser.add_argument("-v", "--version", + action="store_true", + help="print script version") + return parser.parse_args() - url = ('https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(' - 'full)&oldid=986793908') - soup = BeautifulSoup(requests.get(url).content, "html.parser") - i = 0 # For the phrase id +def get_html(url): + print("downloading webpage") + return BeautifulSoup(requests.get(url).content, "html.parser") - with open(args.output, 'w') as f: - writer = csv.writer(f, lineterminator="\n") +def prep_database(): + print("prepping database") + c.execute("DROP TABLE IF EXISTS phrases") + c.execute("""CREATE TABLE phrases( + id INTEGER, + latin TEXT, + english TEXT, + notes TEXT, + length INTEGER)""") - # write header - headers = ['id', 'Latin', 'English', 'Notes', 'Length'] - writer.writerow(headers) +def fill_database(list_table): + i = 0 # phrase id + print("iterating through tables") + for table in list_table: + for row in table.tbody.find_all("tr", recursive=False): + cell = row.find_all("td", recursive=False) + if len(cell) > 2: + print(i, end="\r") - # iterate through the tables in the page - list_table = soup.find_all("table", attrs={"class":"wikitable"}) - for table in list_table: - for row in table.tbody.find_all("tr", recursive=False): - cell = row.find_all("td", recursive=False) - rowc = [] + latin = (cell[0].get_text(" ", strip=True)).rstrip() + english = (cell[1].get_text(" ", strip=True)).rstrip() + notes = (cell[2].get_text(" ", strip=True)).rstrip() + + c.execute("""INSERT INTO phrases + (id, latin, english, notes, length) + VALUES(?, ?, ?, ?, ?)""", + (i, latin, english, notes, len(latin))) + conn.commit() + i = i + 1 - rowc.append(i) # append phrase id +def get_tables(): + url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_( + full)&oldid=986793908""") + return get_html(url).find_all("table", attrs={"class":"wikitable"}) - # add cell content - for content in cell: - text = (content.get_text(" ", strip=True)).rstrip() - rowc.append(text) - - if len(rowc) > 1: - rowc.append(len(rowc[1])) - writer.writerow(rowc) - i = i + 1 - f.close() +def main(): + if args.version: + print(version) + else: + prep_database() + fill_database(get_tables()) if __name__ == "__main__": + version = "phrases extract.py 1.0.1" + args = parse_args() + conn = sqlite3.connect(args.output) + c = conn.cursor() main()