]> git.armaanb.net Git - phrases.git/blobdiff - extract.py
intial switch to sqlite
[phrases.git] / extract.py
index 4e6c1f63544e13e9e434718350714ddd080b9382..8f8877a8fd51968751022a80bc2272edb120de49 100755 (executable)
@@ -3,51 +3,59 @@
 # Armaan Bhojwani 2020
 
 import argparse
+import sqlite3
 import sys
-import csv
 import requests
 from bs4 import BeautifulSoup
 
 def main(args=sys.argv[1:]):
     # Argument parsing
     parser = argparse.ArgumentParser(
-        description="Generate CSV file of Latin famous phrases from Wikipedia.")
+        description="Generate SQLite db of Latin famous phrases from Wikipedia.")
     parser.add_argument("-o", "--output",
-                       default="phrases.csv",
+                       default="phrases.db",
                        help="set custom output file location")
     args = parser.parse_args()
 
-    url = ('https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_('
-          'full)&oldid=986793908')
+    url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(
+          full)&oldid=986793908""")
+    print("downloading webpage")
     soup = BeautifulSoup(requests.get(url).content, "html.parser")
-    i = 0 # For the phrase id
-
-    with open(args.output, 'w') as f:
-        writer = csv.writer(f, lineterminator="\n")
-
-        # write header
-        headers = ['id', 'Latin', 'English', 'Notes', 'Length']
-        writer.writerow(headers)
 
-        # iterate through the tables in the page
-        list_table = soup.find_all("table", attrs={"class":"wikitable"})
-        for table in list_table:
-            for row in table.tbody.find_all("tr", recursive=False):
-                cell = row.find_all("td", recursive=False)
-                rowc = []
+    print("prepping database")
+    conn = sqlite3.connect(args.output)
+    c = conn.cursor()
+    c.execute("DROP TABLE IF EXISTS phrases")
+    c.execute("""CREATE TABLE phrases(
+              id INTEGER,
+              latin TEXT,
+              english TEXT,
+              notes TEXT,
+              length INTEGER)""")
 
-                rowc.append(i) # append phrase id
-
-                # add cell content
-                for content in cell:
-                    text = (content.get_text(" ", strip=True)).rstrip()
-                    rowc.append(text)
+    i = 0 # For the phrase id
 
-                if len(rowc) > 1:
-                    rowc.append(len(rowc[1]))
-                    writer.writerow(rowc)
-                i = i + 1
-    f.close()
+    # iterate through the tables in the page
+    list_table = soup.find_all("table", attrs={"class":"wikitable"})
+    print("iterating through tables")
+    for table in list_table:
+        for row in table.tbody.find_all("tr", recursive=False):
+            cell = row.find_all("td", recursive=False)
+            if len(cell) > 2:
+                print(i, end="\r")
+                latin = (cell[0].get_text(" ", strip=True)).rstrip()
+                english = (cell[1].get_text(" ", strip=True)).rstrip()
+                notes = (cell[2].get_text(" ", strip=True)).rstrip()
+    
+                c.execute("""INSERT INTO phrases (id, latin, english, notes, length)
+                          VALUES(?, ?, ?, ?, ?)""", (i, latin, english, notes, len(latin)))
+                conn.commit()
+
+            i = i + 1
+
+    print("closing database")
+    c.close()
+    conn.close()
 
 if __name__ == "__main__":
     main()