import requests
from bs4 import BeautifulSoup
+
def parse_args():
parser = argparse.ArgumentParser(
- description="Generate SQLite db of Latin famous phrases from Wikipedia.")
+ description="Generate database of Latin famous phrases from Wikipedia")
parser.add_argument("-o", "--output",
- default="phrases.db",
- help="set custom output file location")
+ default="phrases.db",
+ help="set custom output file location")
parser.add_argument("-v", "--version",
action="store_true",
help="print script version")
return parser.parse_args()
+
def get_html(url):
print("downloading webpage")
return BeautifulSoup(requests.get(url).content, "html.parser")
+
def prep_database():
print("prepping database")
c.execute("DROP TABLE IF EXISTS phrases")
notes TEXT,
length INTEGER)""")
+
def fill_database(list_table):
- i = 0 # phrase id
+ i = 0 # phrase id
print("iterating through tables")
for table in list_table:
for row in table.tbody.find_all("tr", recursive=False):
latin = (cell[0].get_text(" ", strip=True)).rstrip()
english = (cell[1].get_text(" ", strip=True)).rstrip()
notes = (cell[2].get_text(" ", strip=True)).rstrip()
-
+
c.execute("""INSERT INTO phrases
(id, latin, english, notes, length)
VALUES(?, ?, ?, ?, ?)""",
- (i, latin, english, notes, len(latin)))
+ (i, latin, english, notes, len(latin)))
conn.commit()
i = i + 1
+
def get_tables():
url = ("""https://en.wikipedia.org/w/index.php?title=List_of_Latin_phrases_(
full)&oldid=986793908""")
- return get_html(url).find_all("table", attrs={"class":"wikitable"})
+ return get_html(url).find_all("table", attrs={"class": "wikitable"})
+
def main():
if args.version:
prep_database()
fill_database(get_tables())
+
if __name__ == "__main__":
- version = "phrases extract.py 1.0.1"
+ version = "phrases extract.py 1.0.2"
args = parse_args()
conn = sqlite3.connect(args.output)
c = conn.cursor()