X-Git-Url: https://git.armaanb.net/?p=phrases.git;a=blobdiff_plain;f=extract.py;h=833594da483748f0608acaa5c143e88249d007e9;hp=65f99143e2fca0bd24dc65ea8927633451540413;hb=58ad575e75427a13fdca64a17ced4ae2f3f986ad;hpb=c9ecbb88bfab85d5149d7b7681169d4855ebac4a diff --git a/extract.py b/extract.py index 65f9914..833594d 100755 --- a/extract.py +++ b/extract.py @@ -2,22 +2,63 @@ # Extract Latin famous phrases from wikipedia # Armaan Bhojwani 2020 -from bs4 import BeautifulSoup +import argparse +import sqlite3 import requests +from bs4 import BeautifulSoup + +def parse_args(): + parser = argparse.ArgumentParser( + description="Generate SQLite db of Latin famous phrases from Wikipedia.") + parser.add_argument("-o", "--output", + default="phrases.db", + help="set custom output file location") + return parser.parse_args() + +def get_html(url): + print("downloading webpage") + return BeautifulSoup(requests.get(url).content, "html.parser") + +def prep_database(c): + print("prepping database") + c.execute("DROP TABLE IF EXISTS phrases") + c.execute("""CREATE TABLE phrases( + id INTEGER, + latin TEXT, + english TEXT, + notes TEXT, + length INTEGER)""") + +def fill_database(list_table, c, conn): + i = 0 # phrase id + print("iterating through tables") + for table in list_table: + for row in table.tbody.find_all("tr", recursive=False): + cell = row.find_all("td", recursive=False) + if len(cell) > 2: + print(i, end="\r") + + latin = (cell[0].get_text(" ", strip=True)).rstrip() + english = (cell[1].get_text(" ", strip=True)).rstrip() + notes = (cell[2].get_text(" ", strip=True)).rstrip() + + c.execute("""INSERT INTO phrases + (id, latin, english, notes, length) + VALUES(?, ?, ?, ?, ?)""", + (i, latin, english, notes, len(latin))) + conn.commit() + i = i + 1 + +def get_tables(): + url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_( + full)&oldid=986793908""") + return get_html(url).find_all("table", attrs={"class":"wikitable"}) + +def main(args): + conn = sqlite3.connect(args.output) + c = conn.cursor() + prep_database(c) + fill_database(get_tables(), c, conn) -url = 'https://en.wikipedia.org/wiki/List_of_Latin_phrases_(full)' -response = requests.get(url) -html = response.content - -soup = BeautifulSoup(html, "html.parser") -list_table = soup.find_all("table", attrs={"class":"wikitable"}) -f = open("phrases", "w") - -for table in list_table: - for row in table.find_all("tr")[1:]: - f.write("%" ) - cell = row.find_all("td") - for content in cell: - text = content.get_text() - f.write("\n" + text) -f.close() +if __name__ == "__main__": + main(parse_args())