-
Notifications
You must be signed in to change notification settings - Fork 0
/
parser.py
142 lines (134 loc) · 5.03 KB
/
parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#2020 FALL EE208
#Copyright: Group5
import jieba
import re
import urllib
import requests
import datetime
import sys, os,threading, time
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from datetime import datetime
class Parser(object):
def __init__(self,_index,_html_folder):
'''
@self.index:存放filename和url对应关系的文件名
@self.html_folder:存放html文件的文件夹名
@self.filename_url:储存filename和对应的url
'''
self.index=_index
self.html_folder=_html_folder
self.filename_url=dict()
self.pic_count=0
#######################
#初始化filename_url字典#
#######################
def intial_filename_url(self):
with open(self.index,"r") as f:
while True:
line=f.readline()
if line=='\n' or line=='':
break
url_filename=line.split('\t')
url=url_filename[0]
if not len(url_filename)==2:
continue
filename=url_filename[1][:-1]
self.filename_url[filename]=url
print("Reading text finished!")
##################
#获取新闻网页的图片#
##################
def get_img(self,url,soup,filename):
news_img_list=[]
for i in soup.findAll('div',{'class':'textBody'}):
for img in i.findAll('img'):
img_src=img.get('src')
img_src=urllib.parse.urljoin(url,img_src)
news_img_list.append(img_src)
return news_img_list
#################
#返回新闻具体信息#
#################
def news_detail(self,filename):
'''
@filename:html文件名
'''
path=os.path.join(self.html_folder,filename)
f=open(path,encoding='utf-8')
contents=f.read()
f.close()
soup=BeautifulSoup(contents,features="html.parser")
title=re.findall("<title>(.*)</title>",contents)[0] #标题
date=re.findall('<time>(.*)</time>',contents)[0] #日期
#文本内容
text=""
for txt in soup.findAll('div',{'class':'textBody'}):
for t in txt.findAll('p'):
text+=t.text
url=self.filename_url.get(filename) #url
#图片
new_img_url=""
if len(self.get_img(url,soup,filename))>0:
new_img_url=self.get_img(url,soup,filename)[0]
#tips
for i in soup.findAll('div',{'class':'tips'}):
tips=i.select('p')[0].text
#site
site=urlparse(url).netloc
#related
related=[]
for div in soup.findAll('div',{'class':'left1 mulu_bg'}):
for i in div.findAll('li',{'class':'even'}):
related_news=dict()
for a in i.findAll('a'):
new_url=a.get('href')
related_news['href']=urllib.parse.urljoin('http://www.jhzhx.com/gjzq/xj/3121.html',new_url)
for img in a.findAll('img'):
related_news['title']=img.get('alt')
img_src=img.get('src')
related_news['img_src']=urllib.parse.urljoin('http://www.jhzhx.com/gjzq/xj/3121.htmll',img_src)
related.append(related_news)
related_news['text']=i.select('p')[0].text
return title,date,url,site,new_img_url,tips,text,related
##########
#存储图片#
#########
def save_all_img(self):
folder="image"
if not os.path.exists(folder): # 如果文件夹不存在则新建
os.mkdir(folder)
with open(self.index,"r") as f:
while True:
line=f.readline()
if line=='\n' or line=='':
break
url_filename=line.split('\t')
filename=url_filename[1][:-1]
self.save_img(folder,filename)
def save_img(self,folder,filename):
global count
path=os.path.join(self.html_folder,filename)
try:
f=open(path,encoding='utf-8')
contents=f.read()
f.close()
soup=BeautifulSoup(contents,features="html.parser")
url=self.filename_url.get(filename)
img_list=self.get_img(url,soup,filename)
count=0
for img in img_list:
img_filename=filename+"%"+str(count)+'.jpg'
f=open(os.path.join(folder,img_filename),'wb')
a=requests.get(img)
f.write(a.content) # 将图片存入文件
f.close()
count+=1
self.pic_count+=1
print(self.pic_count,"pictures")
except:
return
if __name__ == '__main__':
xinwen_parser=Parser('sports_index.txt','xinwen_html')
xinwen_parser.intial_filename_url()
xinwen_parser.save_all_img()