]> git.armaanb.net Git - phrases.git/blobdiff - extract.py
Make manconvert smarter
[phrases.git] / extract.py
index 322a3020c3eccb18c9ad5c29b8ff9845f81771b8..4a0a6b00cb3b44a01e3b02e54506dcd6197379b7 100755 (executable)
@@ -1,56 +1,75 @@
 #!/usr/bin/env python3
-# Extract Latin famous phrases from wikipedia
-# Armaan Bhojwani 2020
+# Extract Latin famous phrases from Wikipedia
+# Armaan Bhojwani 2021
 
-from bs4 import BeautifulSoup
+import argparse
+import sqlite3
 import requests
-import csv
+from bs4 import BeautifulSoup
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(
+        description="Generate database of Latin famous phrases from Wikipedia")
+    parser.add_argument("-o", "--output",
+                        default="phrases.db",
+                        help="set custom output file location")
+    parser.add_argument("-v", "--version",
+                        action="version",
+                        version="phrases-extract 1.0.3")
+    return parser.parse_args()
+
+
+def get_html(url):
+    print("downloading webpage")
+    return BeautifulSoup(requests.get(url).content, "html.parser")
+
+
+def prep_database():
+    print("prepping database")
+    c.execute("DROP TABLE IF EXISTS phrases")
+    c.execute("""CREATE TABLE phrases(
+              id INTEGER,
+              latin TEXT,
+              english TEXT,
+              notes TEXT,
+              length INTEGER)""")
+
+
+def fill_database(list_table):
+    i = 0  # phrase id
+    print("iterating through tables")
+    for table in list_table:
+        for row in table.tbody.find_all("tr", recursive=False):
+            cell = row.find_all("td", recursive=False)
+            if len(cell) > 2:
+                print(i, end="\r")
+
+                latin = (cell[0].get_text(" ", strip=True)).rstrip()
+                english = (cell[1].get_text(" ", strip=True)).rstrip()
+                notes = (cell[2].get_text(" ", strip=True)).rstrip()
+
+                c.execute("""INSERT INTO phrases
+                         (id, latin, english, notes, length)
+                         VALUES(?, ?, ?, ?, ?)""",
+                          (i, latin, english, notes, len(latin)))
+                conn.commit()
+            i = i + 1
+
+
+def get_tables():
+    url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(
+          full)&oldid=986793908""")
+    return get_html(url).find_all("table", attrs={"class": "wikitable"})
+
 
 def main():
-    url = 'https://en.wikipedia.org/wiki/List_of_Latin_phrases_(full)'
-    response = requests.get(url)
-    html = response.content
-
-    soup = BeautifulSoup(html, "html.parser")
-    list_table = soup.find_all("table", attrs={"class":"wikitable"})
-    with open('phrases.csv', 'w') as f:
-        writer = csv.writer(f)
-
-        i = 0 # For the phrase id
-        # iterate through the tables in the page
-        for table in list_table:
-            for row in table.find_all("tr")[1:]:
-                cell = row.find_all("td")
-                rowc = []
-
-                # append phrase id
-                rowc.append(i)
-
-                # avoid out of bounds errors
-                if len(cell) == 2:
-                    lan = 2
-                else:
-                    lan = 3
-
-                # add cell content
-                for j in range (0, lan):
-                    content = cell[j]
-                    text=(content.get_text()).rstrip()
-                    rowc.append(text)
-
-                # append length of phrase
-                rowc.append(len(rowc[1]))
-                writer.writerow(rowc)
-                i = i + 1
-    f.close()
-
-    # Strip empty lines
-    with open('phrases.csv', 'r+') as f:
-        lines = f.readlines()
-        f.seek(0)
-        f.writelines(line for line in lines if line.strip())
-        f.truncate()
-    f.close()
+    prep_database()
+    fill_database(get_tables())
+
 
 if __name__ == "__main__":
+    args = parse_args()
+    conn = sqlite3.connect(args.output)
+    c = conn.cursor()
     main()