-
Notifications
You must be signed in to change notification settings - Fork 0
/
scrapper.py
49 lines (30 loc) · 1.13 KB
/
scrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import requests
from bs4 import BeautifulSoup
import pprint
# url of HN page 1 and 2
url1 = 'https://news.ycombinator.com/news'
url2 = 'https://news.ycombinator.com/news?p=2'
def scrape(url):
res = requests.get(url1)
parse = BeautifulSoup(res.text, 'html.parser')
links = parse.select('.titleline > a')
subtexts = parse.select('.subtext')
# you can set the points benchmark here
set_points = 99
hn = []
for index, item in enumerate(links):
title = item.getText()
href = item.get('href', None)
vote = subtexts[index].select('.score')
if len(vote):
points = int(vote[0].getText().replace(' points', ''))
if points > set_points:
hn.append({'title': title, 'votes': points, 'link': href})
return sorted_by_votes(hn)
def sorted_by_votes(news_links):
return sorted(news_links, key=lambda k: k['votes'], reverse=True)
# both pages consolidated. you can add nth number of pages
page1 = scrape(url1)
page2 = scrape(url2)
scrapped_pages = page1 + page2
pprint.pprint(scrapped_pages)