]> git.armaanb.net Git - phrases.git/blobdiff - extract.py
functionalized things
[phrases.git] / extract.py
index 4e6c1f63544e13e9e434718350714ddd080b9382..833594da483748f0608acaa5c143e88249d007e9 100755 (executable)
@@ -3,51 +3,62 @@
 # Armaan Bhojwani 2020
 
 import argparse
-import sys
-import csv
+import sqlite3
 import requests
 from bs4 import BeautifulSoup
 
-def main(args=sys.argv[1:]):
-    # Argument parsing
+def parse_args():
     parser = argparse.ArgumentParser(
-        description="Generate CSV file of Latin famous phrases from Wikipedia.")
+        description="Generate SQLite db of Latin famous phrases from Wikipedia.")
     parser.add_argument("-o", "--output",
-                       default="phrases.csv",
+                       default="phrases.db",
                        help="set custom output file location")
-    args = parser.parse_args()
+    return parser.parse_args()
 
-    url = ('https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_('
-          'full)&oldid=986793908')
-    soup = BeautifulSoup(requests.get(url).content, "html.parser")
-    i = 0 # For the phrase id
+def get_html(url):
+    print("downloading webpage")
+    return BeautifulSoup(requests.get(url).content, "html.parser")
 
-    with open(args.output, 'w') as f:
-        writer = csv.writer(f, lineterminator="\n")
+def prep_database(c):
+    print("prepping database")
+    c.execute("DROP TABLE IF EXISTS phrases")
+    c.execute("""CREATE TABLE phrases(
+              id INTEGER,
+              latin TEXT,
+              english TEXT,
+              notes TEXT,
+              length INTEGER)""")
 
-        # write header
-        headers = ['id', 'Latin', 'English', 'Notes', 'Length']
-        writer.writerow(headers)
+def fill_database(list_table, c, conn):
+    i = 0 # phrase id
+    print("iterating through tables")
+    for table in list_table:
+        for row in table.tbody.find_all("tr", recursive=False):
+            cell = row.find_all("td", recursive=False)
+            if len(cell) > 2:
+                print(i, end="\r")
 
-        # iterate through the tables in the page
-        list_table = soup.find_all("table", attrs={"class":"wikitable"})
-        for table in list_table:
-            for row in table.tbody.find_all("tr", recursive=False):
-                cell = row.find_all("td", recursive=False)
-                rowc = []
+                latin = (cell[0].get_text(" ", strip=True)).rstrip()
+                english = (cell[1].get_text(" ", strip=True)).rstrip()
+                notes = (cell[2].get_text(" ", strip=True)).rstrip()
+    
+                c.execute("""INSERT INTO phrases
+                         (id, latin, english, notes, length)
+                         VALUES(?, ?, ?, ?, ?)""",
+                         (i, latin, english, notes, len(latin)))
+                conn.commit()
+            i = i + 1
 
-                rowc.append(i) # append phrase id
+def get_tables():
+    url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(
+          full)&oldid=986793908""")
+    return get_html(url).find_all("table", attrs={"class":"wikitable"})
 
-                # add cell content
-                for content in cell:
-                    text = (content.get_text(" ", strip=True)).rstrip()
-                    rowc.append(text)
-
-                if len(rowc) > 1:
-                    rowc.append(len(rowc[1]))
-                    writer.writerow(rowc)
-                i = i + 1
-    f.close()
+def main(args):
+    conn = sqlite3.connect(args.output)
+    c = conn.cursor()
+    prep_database(c)
+    fill_database(get_tables(), c, conn)
 
 if __name__ == "__main__":
-    main()
+    main(parse_args())