-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscraper.py
78 lines (69 loc) · 2.37 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup, Tag
# Functions taken from https://www.pybloggers.com/2018/01/practical-introduction-to-web-scraping-in-python/
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def http_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def parse_and_print(object):
if object.name == 'h1' or object.name == 'h2' or object.name == 'h3' or object.name == 'p':
print(object.text)
print("-----------")
#raw_html = open('contrived.html').read()
#html = BeautifulSoup(raw_html, 'html.parser')
#for p in html.select('p'):
# print ("Line:", p.text)
# if p['id'] == 'walrus':
# print(p.text)
raw_html = http_get('http://www.elderek.com/about.html')
#print(get(url_root))
html = BeautifulSoup(raw_html, 'html.parser')
#print(html)
#print(len(html))
"""
for h1 in html.select('h1'):
print("H1:", h1.text)
for h2 in html.select('h2'):
print("H2:", h2.text)
for h3 in html.select('h3'):
print("H3:", h3.text)
for p in html.select('p'):
print("p:", p.text)
"""
print("-----------")
soup = BeautifulSoup(raw_html, 'html.parser')
for toplevel in soup.find_all('h1'):
parse_and_print(toplevel)
for sibling in toplevel.next_siblings:
parse_and_print(sibling)
if sibling.name == 'div':
for child in sibling.findChildren():
parse_and_print(child)