-
Notifications
You must be signed in to change notification settings - Fork 0
/
__init__.py
53 lines (43 loc) · 1.9 KB
/
__init__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from bs4 import BeautifulSoup
import requests
import csv
def scrape_carrier(carrier_id):
""""scrape the page of one carrier"""
print(f"Attempting Carrier ID: {carrier_id}")
URL = f"https://ai.fmcsa.dot.gov/SMS/Carrier/{carrier_id}/CarrierRegistration.aspx"
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
# get cargo
cargo_carried = soup.find('ul', class_ = 'cargo')
included_cargo_soups = cargo_carried.find_all('li', class_='checked')
# remove checkmark span (redundant on li class)
[item.find('span').decompose() for item in included_cargo_soups]
included_cargo = [item.text for item in included_cargo_soups]
# get vehicle type
vehicle_type_soups = soup.find_all('th', class_='vehType')
vehicle_type_rows = [item.find_parent('tr') for item in vehicle_type_soups]
vehicle_type_table = [[item.text for item in row.find_all()] for row in vehicle_type_rows]
return (included_cargo, vehicle_type_table)
def parse_carrier_ids(fp):
"""parse carriers ids to put into scraper"""
with open(fp, encoding='windows-1252') as csvfile:
reader = csv.reader(csvfile)
CARRIER_ID_COLUMN_INDEX = 0
# ignore header row
reader.__next__()
ids = [row[CARRIER_ID_COLUMN_INDEX] for row in reader]
return ids
def write_carrier_results(results):
"""writes carrier information into joinable csvs"""
with open('data/carrier.csv', 'a') as carrier_file:
csv.writer(carrier_file).writerow(results[0])
with open('data/carrier_vehicle.csv', 'a') as carrier_vehicle_file:
csv.writer(carrier_vehicle_file).writerow(results[1])
def main(fp):
"""scrape all carriers"""
ids = parse_carrier_ids(fp)
for carrier_id in ids:
results = scrape_carrier(carrier_id)
write_carrier_results(results)
if __name__ == "__main__":
main('FMCSA_CENSUS1_2020Aug/FMCSA_CENSUS1_2020Aug.txt')