]> git.armaanb.net Git - phrases.git/blobdiff - extract.py
add -o flag
[phrases.git] / extract.py
index 65f99143e2fca0bd24dc65ea8927633451540413..4e6c1f63544e13e9e434718350714ddd080b9382 100755 (executable)
@@ -2,22 +2,52 @@
 # Extract Latin famous phrases from wikipedia
 # Armaan Bhojwani 2020
 
-from bs4 import BeautifulSoup
+import argparse
+import sys
+import csv
 import requests
+from bs4 import BeautifulSoup
+
+def main(args=sys.argv[1:]):
+    # Argument parsing
+    parser = argparse.ArgumentParser(
+        description="Generate CSV file of Latin famous phrases from Wikipedia.")
+    parser.add_argument("-o", "--output",
+                       default="phrases.csv",
+                       help="set custom output file location")
+    args = parser.parse_args()
+
+    url = ('https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_('
+          'full)&oldid=986793908')
+    soup = BeautifulSoup(requests.get(url).content, "html.parser")
+    i = 0 # For the phrase id
+
+    with open(args.output, 'w') as f:
+        writer = csv.writer(f, lineterminator="\n")
+
+        # write header
+        headers = ['id', 'Latin', 'English', 'Notes', 'Length']
+        writer.writerow(headers)
+
+        # iterate through the tables in the page
+        list_table = soup.find_all("table", attrs={"class":"wikitable"})
+        for table in list_table:
+            for row in table.tbody.find_all("tr", recursive=False):
+                cell = row.find_all("td", recursive=False)
+                rowc = []
+
+                rowc.append(i) # append phrase id
+
+                # add cell content
+                for content in cell:
+                    text = (content.get_text(" ", strip=True)).rstrip()
+                    rowc.append(text)
+
+                if len(rowc) > 1:
+                    rowc.append(len(rowc[1]))
+                    writer.writerow(rowc)
+                i = i + 1
+    f.close()
 
-url = 'https://en.wikipedia.org/wiki/List_of_Latin_phrases_(full)'
-response = requests.get(url)
-html = response.content
-
-soup = BeautifulSoup(html, "html.parser")
-list_table = soup.find_all("table", attrs={"class":"wikitable"})
-f = open("phrases", "w")
-
-for table in list_table:
-    for row in table.find_all("tr")[1:]:
-        f.write("%" )
-        cell = row.find_all("td")
-        for content in cell:
-            text = content.get_text()
-            f.write("\n" + text)
-f.close()
+if __name__ == "__main__":
+    main()