forked from hungchun0201/NTUclassCrawler
-
Notifications
You must be signed in to change notification settings - Fork 1
/
crawl.py
248 lines (216 loc) · 10.2 KB
/
crawl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import requests
from bs4 import BeautifulSoup
import re
import pandas as pd
import time
import argparse
import ast
class Crawler():
def __init__(self):
self.periodDict = {
"7:10": "0",
"8:10": "1",
"9:10": "2",
"10:20": "3",
"11:20": "4",
"12:20": "5",
"13:20": "6",
"14:20": "7",
"15:30": "8",
"16:30": "9",
"17:30": "10",
"18:25": "A",
"19:20": "B",
"20:15": "C",
"21:10": "D",
}
self.periodKey = list(self.periodDict.keys())
# OrderedDict(sorted(self.periodDict.items(), key=lambda t: t[0]))
self.parser()
def parser(self):
def parseOptArgs(str):
if str == None:
return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key, val = p, 1
opts[key] = val
return opts
parser = argparse.ArgumentParser(prog='python3 crawl.py', description='''
This is a program that can crawl data of classes in next semester from
"classroom management system(上課教室看板)". With this program, you can arrange your classes before they come out at NTU online. While the information is not quite
complete on the website, it is still a useful and helpful tool if you want to
organize the curriculum for next semester in advance. The reference link is
http://gra206.aca.ntu.edu.tw/classrm/index.php/acarm/webcr-use1-new.
''', epilog="Designed by Hung-Chun,Lin.", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--semester", help="Select the semester you want to query", default="1101")
parser.add_argument(
"--delay-time", help="Set the delay time between each request", type=float, default=0, dest="delay")
parser.add_argument(
"-p", "--page", help="Assign the maximum page of each day.", type=int)
parser.add_argument(
"-s", "--save", help='''Store the result. You can specify your filename.
Please use .xls or .xlsx as filename extension.''', const="result.xls", action="store", nargs="?")
# parser.add_argument(
# "-s", "--save", help='''Store the result. You can specify your filename.
# Please use .xls or .xlsx as filename extension.''', const="result.csv", action="store", nargs="?")
parser.add_argument(
"-b", help='''Specify the building you want to query.
If the building belongs to a college(ex:College of Electrical Engineering and Computer Science),
use the code of that college(ex:9),or simply type the name of the building(ex:博雅).
The percentage sign means search for all buildings.''', default=9, dest="building",
choices=["1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "共同", "普通", "新生", "綜合", "博雅", "%"]
)
parser.add_argument(
"--search-opt", dest="searchOpt", help='''
Comma separated values to specifiy search options e.g. "Title=積體電路,Classroom=電二"
The avaliable args include:
"Id": Curriculum Identity Number
"Class": The class number. If the course is teached by only one teacher, it is set to 00
"Title": Course title
"Instructor": Teacher name
"Classroom": Schedule Classroom
"Time": The time of course
For example, if you type "--search-opt Title=積體電路,Classroom=電二", you may get the following result:
Id Class Title Instructor Classroom Time
0 943U0010 00 積體電路測試 李建模 電二146 二2,3,4
1 921U9590 00 電力電子與積體電路控制 陳景然 電二225 二7,8,9
2 943U0120 00 射頻積體電路設計 陳怡然 電二104 三2,3,4
3 90140500 00 積體電路設計 盧奕璋 電二229 三7,8,9
4 942U0120 00 微波積體電路專題 林坤佑 電二101 四7,8,9
'''
)
args = parser.parse_args()
args.searchOpt = parseOptArgs(args.searchOpt)
self.args = args
def transformTime(self, time):
day = time[1]
periods = time[4:]
periods = periods.split('~')
if(periods[0] == periods[1]): # only one session
return day+self.periodDict[periods[0]]
else: # more than one session
return_str = ""
start = self.periodKey.index(periods[0])
end = self.periodKey.index(periods[1])+1
return_str = ','.join(self.periodDict[k]
for k in self.periodKey[start:end])
return day+return_str
def getMaximumPage(self):
doc = requests.get(
'http://gra206.aca.ntu.edu.tw/classrm/index.php/acarm/webcr-use1-new?Type=1&page={}&SYearDDL={}&BuildingDDL={}&Week={}&Capacity=1&SelectButton=%E6%9F%A5%E8%A9%A2'
.format(1, self.args.semester, self.args.building, 1))
doc.encoding = 'UTF-8'
doc = doc.text
soup = BeautifulSoup(doc, 'html.parser')
table = soup.find(id="ClassTimeGV")
last_row = table.find_all("tr")[-1].find("td")
# print(last_row.findChildren())
self.args.page = len(last_row.findChildren())
def checkSearchOpts(self, dict):
if(not self.args.searchOpt):
return True
else:
for key in self.args.searchOpt:
if(self.args.searchOpt[key] not in dict[key]):
return False
else:
print(dict)
return True
def crawl(self):
class_info = []
if(not self.args.page):
self.getMaximumPage()
for week in range(1, 6):
for page in range(1, self.args.page+1):
doc = requests.get(
'http://gra206.aca.ntu.edu.tw/classrm/index.php/acarm/webcr-use1-new',
params={
'Type': '1',
'page': str(page),
'SYearDDL': self.args.semester,
'BuildingDDL': self.args.building,
'Week': str(week),
'Capacity': '1',
'SelectButton': '%E6%9F%A5%E8%A9%A2'
})
doc.encoding = 'UTF-8'
doc = doc.text
soup = BeautifulSoup(doc, 'lxml')
script = soup.select("#ContentPlaceHolder1 > script")[0]
# All course data in this page can be found in varaible timeDT
map_search = re.search('timeDT\s*=\s*(.*?}])\s*;', str(script))
# Convert to array from string
course_info = ast.literal_eval(map_search[1])
for classroom in course_info:
if(len(classroom.keys())==2):
continue
Sessions = list(classroom.keys())
Sessions.remove("Item")
Sessions.remove("Msg")
for session in Sessions:
course = classroom[session]["Info"][0]
# Add class number.
# some class is instructed by only one prof,so there is no class number.
if(course['cr_clas'] == ''):
course['cr_clas'] = "00"
# else:
# # bad course
# continue
dict = {
"Id": course['cr_cono'], # Curriculum Identity Number
"Class": course['cr_clas'],
"Title": course['cr_cnam'], # Course title
"Instructor": course['cr_tenam'],
"Classroom": course['cr_no'], # Schedule Classroom
"Time": course['cr_time'],
}
dict["Time"] = self.transformTime(dict["Time"])
if(not self.checkSearchOpts(dict)):
continue
# check whether is the same class
endloop = False
for old_dict in class_info:
if(old_dict["Id"] == dict["Id"] and old_dict["Class"] == dict["Class"]):
endloop = True
if(dict["Time"] in old_dict["Time"]):
break
else:
old_dict["Time"] = old_dict["Time"] + \
' '+dict["Time"]
break
if(endloop):
continue
class_info.append(dict)
print("========== Got", len(
class_info), "courses information, until Page", page, "and Day", week, "==========")
time.sleep(self.args.delay)
select_df = pd.DataFrame(class_info)
if(not select_df.empty):
select_df["Id"] = select_df["Id"].map(lambda x: '%-8s' % x)
select_df["Class"] = select_df["Class"].map(lambda x: '%-2s' % x)
select_df["Title"] = select_df["Title"].map(
lambda x: x.ljust(13, chr(12288)))
select_df["Instructor"] = select_df["Instructor"].map(
lambda x: x.ljust(4, chr(12288)))
select_df["Classroom"] = select_df["Classroom"].map(
lambda x: '{0:{1}<6}'.format(x, chr(12288)))
select_df["Time"] = select_df["Time"].map(
lambda x: '{0:{1}<8}'.format(x, chr(12288)))
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(select_df)
if(self.args.save):
try:
select_df.to_excel(self.args.save)
except:
print("Bad Filename!!! \nWarning:You probably did not install openpyxl first, type \"pip install openpyxl\" to install the package.")
return
if __name__ == '__main__':
crawler = Crawler()
crawler.crawl()