From e01cae7bd4f6f81127aa54daedc031275b092a9c Mon Sep 17 00:00:00 2001 From: Xonshiz Date: Tue, 30 May 2017 22:06:53 +0530 Subject: [PATCH] Fix for #12 and code optimization Read the changelog for more information. --- Changelog.md | 4 +- anime_dl/AnimeDL.py | 16 +- anime_dl/__main__.py | 1 + anime_dl/animeName.py | 19 ++ anime_dl/sites/crunchyroll.py | 433 ++++++---------------------------- anime_dl/sites/funimation.py | 107 ++++++++- anime_dl/version.py | 2 +- docs/Changelog.md | 4 +- 8 files changed, 213 insertions(+), 373 deletions(-) create mode 100644 anime_dl/animeName.py diff --git a/Changelog.md b/Changelog.md index b264da6..5e58016 100644 --- a/Changelog.md +++ b/Changelog.md @@ -10,4 +10,6 @@ - Fix for [6](https://github.com/Xonshiz/anime-dl/issues/6) and Fix for [3](https://github.com/Xonshiz/anime-dl/issues/3) [2017.04.13] - Fix for #9 [2017.04.13] - Added `Verbose Logging` [2017.04.13] -- Fix for #11 [2017.04.21] \ No newline at end of file +- Fix for #11 [2017.04.21] +- Re-write code to remove unnecessary parts [2017.05.30] +- Fix for #12 [2017.05.30] \ No newline at end of file diff --git a/anime_dl/AnimeDL.py b/anime_dl/AnimeDL.py index f0a8588..6c6e5a1 100644 --- a/anime_dl/AnimeDL.py +++ b/anime_dl/AnimeDL.py @@ -11,6 +11,9 @@ from sys import exit +'''First, the honcho returns the website name and after that, the corresponding methods are called for a particular +website. I don't remember why I added an extra step, I really don't. Oh well, it's working, so let it work.''' + class AnimeDL(object): def __init__(self, url, username, password, resolution, language, skipper, logger): @@ -26,15 +29,26 @@ def __init__(self, url, username, password, resolution, language, skipper, logge sites.crunchyroll.CrunchyRoll( url=url[0], password=password, username=username, resolution=resolution, language=language, skipper=skipper, logger = logger) + elif website == "Funimation": + if not url[0] or not username[0] or not password[0]: + print("Please enter the required arguments. Run __main__.py --help") + exit() + else: + sites.funimation.Funimation(url[0], username, password, resolution, language) + def honcho(self, url): # print("Got url : %s" % url) # Verify that we have a sane url and return which website it belongs # to. + + # if there's not http:/, then netloc is empty. + # Gotta add the "if crunchyroll in url..." + # print(url) domain = urlparse(url).netloc # print(domain) if domain in ["www.funimation.com", "funimation.com"]: - sites.funimation.Funimation() + return "Funimation" elif domain in ["www.crunchyroll.com", "crunchyroll.com"]: return "Crunchyroll" diff --git a/anime_dl/__main__.py b/anime_dl/__main__.py index 1795d14..72762b6 100644 --- a/anime_dl/__main__.py +++ b/anime_dl/__main__.py @@ -10,6 +10,7 @@ # from anime_dl import AnimeDL from sys import exit from version import __version__ +from anime_dl import animeName import argparse import logging import platform diff --git a/anime_dl/animeName.py b/anime_dl/animeName.py new file mode 100644 index 0000000..fc6b3a0 --- /dev/null +++ b/anime_dl/animeName.py @@ -0,0 +1,19 @@ +import re +import subprocess + +class animeName(object): + + def nameEdit(self, animeName, episodeNumber, resolution): + rawName = str(animeName).title().strip().replace("Season ", "S") + " - " + str(episodeNumber).strip() + " [" + str(resolution) + "]" + fileName = str(re.sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(animeName))).title().strip().replace("Season ", "S") + " - " + str(episodeNumber).strip() + " [" + str(resolution) + "].mp4" + + try: + MAX_PATH = int(subprocess.check_output(['getconf', 'PATH_MAX', '/'])) + # print(MAX_PATH) + except (Exception): + MAX_PATH = 4096 + + if len(fileName) > MAX_PATH: + file_name = fileName[:MAX_PATH] + + return fileName \ No newline at end of file diff --git a/anime_dl/sites/crunchyroll.py b/anime_dl/sites/crunchyroll.py index bb32d2b..1b6e9cd 100644 --- a/anime_dl/sites/crunchyroll.py +++ b/anime_dl/sites/crunchyroll.py @@ -19,6 +19,7 @@ from shutil import move from sys import exit import logging +from anime_dl import animeName '''This code Stinx. I'll write a better, faster and compact code when I get time after my exams or in mid. I literally have NO idea what I was thinking when I wrote this piece of code. @@ -143,404 +144,104 @@ def singleEpisode(self, url, cookies, token, resolution): sess = session() sess = create_scraper(sess) - if str(resolution).lower() in ['1080p', '1080', 'best', 'fhd']: - rtmpDL = "false" # Fix for #11 - logging.debug("Downloading Resolution : %s" % resolution) - print("Grabbing Links for 1080p Streams.") - infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=108&video_quality=80¤t_page=%s" % ( - video_id, url) - logging.debug("infoURL : %s" % infoURL) - xml_page = sess.get( - url=infoURL, headers=headers, cookies=cookies).text - # logging.debug("xml_page : %s" % xml_page) + rtmpDL = "false" # Fix for #11 + logging.debug("Downloading Resolution : %s" % resolution) - try: - m3u8_link_raw = str( - search(r'\(.*?)\<\/file\>', xml_page).group( - 1)).strip().replace("&", "&") - logging.debug("m3u8_link_raw : %s" % m3u8_link_raw) - if "mp4:" in m3u8_link_raw: - rtmpDL = "True" - hostLink = str( - search(r'\(.*?)\<\/host\>', xml_page).group( - 1)).strip().replace("&", "&") - - except Exception: - print("Error Found") - exit() - - anime_name = str( - search(r'\(.*?)\<\/series_title\>', xml_page) - .group(1)).strip().replace("’", "'").replace( - ":", " - ").replace("'", "'") - logging.debug("anime_name : %s" % anime_name) - - episode_number = str( - search(r'\(.*?)\<\/episode_number\>', - xml_page).group(1)).strip() - logging.debug("episode_number : %s" % episode_number) - - width = str( - search(r'\(.*?)\<\/width\>', xml_page).group( - 1)).strip() - logging.debug("width : %s" % width) - - height = str( - search(r'\(.*?)\<\/height\>', xml_page).group( - 1)).strip() - logging.debug("height : %s" % height) - - # print("m3u8_link : %s\nanime_name : %s\nepisode_number : %s\nwidth : %s\nheight : %s\n" % (m3u8_link_raw, anime_name, episode_number, width, height)) - # self.subFetcher(xml=str(xml_page), anime_name=anime_name, episode_number=episode_number) - file_name = sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name)) + " - " + str( - episode_number) + " [%sx%s].mp4" % (width, height) - logging.debug("file_name : %s" % file_name) - - # print("File Name : %s\n" % file_name) - try: - MAX_PATH = int(check_output(['getconf', 'PATH_MAX', '/'])) - # print(MAX_PATH) - except (Exception): - MAX_PATH = 4096 - - if len(file_name) > MAX_PATH: - file_name = file_name[:MAX_PATH] - - if not path.exists("Output"): - makedirs("Output") - - if path.isfile("Output/" + file_name): - print('[Anime-dl] File Exist! Skipping %s\n' % file_name) - pass - else: - self.subFetcher( - xml=str(xml_page), - anime_name=anime_name, - episode_number=episode_number) - # UNCOMMENT THIS LINE!!! - if rtmpDL == "True": - self.rtmpDump(host=hostLink, file=m3u8_link_raw, url=url, filename=file_name) - else: - m3u8_file = sess.get( - url=m3u8_link_raw, cookies=cookies, - headers=headers).text.splitlines()[2] - # print("M3u8 : %s" % m3u8_file) - ffmpeg_command = "ffmpeg -i \"%s\" -c copy -bsf:a aac_adtstoasc \"%s\"" % ( - m3u8_file, file_name) - logging.debug("ffmpeg_command : %s" % ffmpeg_command) - call(ffmpeg_command) - - for video_file in glob("*.mp4"): - try: - move(video_file, "Output") - except Exception as e: - print(str(e)) - pass - for sub_files in glob("*.ass"): - try: - move(sub_files, "Output") - except Exception as e: - print(str(e)) - pass - - if str(resolution).lower() in ['720p', '720', 'hd']: - rtmpDL = "false" # Fix for #11 - print("Grabbing Links for 720p Streams.") - infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=62¤t_page=%s" % ( - video_id, url) - xml_page = sess.get( - url=infoURL, headers=headers, cookies=cookies).text - # logging.debug("xml_page : %s" % xml_page) - - try: - m3u8_link_raw = str( - search(r'\(.*?)\<\/file\>', xml_page).group( - 1)).strip().replace("&", "&") - logging.debug("m3u8_link_raw : %s" % m3u8_link_raw) - if "mp4:" in m3u8_link_raw: - rtmpDL = "True" - hostLink = str( - search(r'\(.*?)\<\/host\>', xml_page).group( - 1)).strip().replace("&", "&") - - except Exception: - print("Error Found") - exit() - - anime_name = str( - search(r'\(.*?)\<\/series_title\>', xml_page) - .group(1)).strip().replace("’", "'").replace( - ":", " - ").replace("'", "'") - logging.debug("anime_name : %s" % anime_name) + if str(resolution).lower() in ['1080p', '1080', 'fhd', 'best']: + infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=108&video_quality=80¤t_page=%s" % (video_id, url) - episode_number = str( - search(r'\(.*?)\<\/episode_number\>', - xml_page).group(1)).strip() - logging.debug("episode_number : %s" % episode_number) + elif str(resolution).lower() in ['720p', '720', 'hd']: + infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=62¤t_page=%s" % (video_id, url) - width = str( - search(r'\(.*?)\<\/width\>', xml_page).group( - 1)).strip() - logging.debug("width : %s" % width) - - height = str( - search(r'\(.*?)\<\/height\>', xml_page).group( - 1)).strip() - logging.debug("height : %s" % height) - - # print("m3u8_link : %s\nanime_name : %s\nepisode_number : %s\nwidth : %s\nheight : %s\n" % (m3u8_link_raw, anime_name, episode_number, width, height)) - # self.subFetcher(xml=str(xml_page), anime_name=anime_name, episode_number=episode_number) - file_name = sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name)) + " - " + str( - episode_number) + " [%sx%s].mp4" % (width, height) - logging.debug("file_name : %s" % file_name) - - # print("File Name : %s\n" % file_name) - try: - MAX_PATH = int(check_output(['getconf', 'PATH_MAX', '/'])) - #print(MAX_PATH) - except (Exception): - MAX_PATH = 4096 - - if len(file_name) > MAX_PATH: - file_name = file_name[:MAX_PATH] - - if not path.exists("Output"): - makedirs("Output") - - if path.isfile("Output/" + file_name): - print('[Anime-dl] File Exist! Skipping %s\n' % file_name) - pass - else: - self.subFetcher( - xml=str(xml_page), - anime_name=anime_name, - episode_number=episode_number) - # UNCOMMENT THIS LINE!!! - if rtmpDL == "True": - self.rtmpDump(host = hostLink, file = m3u8_link_raw, url = url, filename = file_name) - else: - m3u8_file = sess.get( - url=m3u8_link_raw, cookies=cookies, - headers=headers).text.splitlines()[2] - # print("M3u8 : %s" % m3u8_file) - ffmpeg_command = "ffmpeg -i \"%s\" -c copy -bsf:a aac_adtstoasc \"%s\"" % ( - m3u8_file, file_name) - logging.debug("ffmpeg_command : %s" % ffmpeg_command) - call(ffmpeg_command) - - for video_file in glob("*.mp4"): - try: - move(video_file, "Output") - except Exception as e: - print(str(e)) - pass - for sub_files in glob("*.ass"): - try: - move(sub_files, "Output") - except Exception as e: - print(str(e)) - pass - - if str(resolution).lower() in ['480p', '480', 'sd']: - rtmpDL = "false" # Fix for #11. - print("Grabbing Links for 480p Streams.") + elif str(resolution).lower() in ['480p', '480', 'sd']: infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=61¤t_page=%s" % ( - video_id, url) - xml_page = sess.get( - url=infoURL, headers=headers, cookies=cookies).text - # logging.debug("xml_page : %s" % xml_page) - - try: - m3u8_link_raw = str( - search(r'\(.*?)\<\/file\>', xml_page).group( - 1)).strip().replace("&", "&") - logging.debug("m3u8_link_raw : %s" % m3u8_link_raw) - if "mp4:" in m3u8_link_raw: - rtmpDL = "True" - hostLink = str( - search(r'\(.*?)\<\/host\>', xml_page).group( - 1)).strip().replace("&", "&") - - except Exception: - print("Error Found") - exit() - - anime_name = str( - search(r'\(.*?)\<\/series_title\>', xml_page) - .group(1)).strip().replace("’", "'").replace( - ":", " - ").replace("'", "'") - logging.debug("anime_name : %s" % anime_name) - - episode_number = str( - search(r'\(.*?)\<\/episode_number\>', - xml_page).group(1)).strip() - logging.debug("episode_number : %s" % episode_number) - - width = str( - search(r'\(.*?)\<\/width\>', xml_page).group( - 1)).strip() - logging.debug("width : %s" % width) - - height = str( - search(r'\(.*?)\<\/height\>', xml_page).group( - 1)).strip() - logging.debug("height : %s" % height) - - # print("m3u8_link : %s\nanime_name : %s\nepisode_number : %s\nwidth : %s\nheight : %s\n" % (m3u8_link_raw, anime_name, episode_number, width, height)) - # self.subFetcher(xml=str(xml_page), anime_name=anime_name, episode_number=episode_number) - file_name = sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name)) + " - " + str( - episode_number) + " [%sx%s].mp4" % (width, height) - logging.debug("file_name : %s" % file_name) + video_id, url) + elif str(resolution).lower() in ['360p', '360', 'cancer']: + infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( + video_id, url) - # print("File Name : %s\n" % file_name) - try: - MAX_PATH = int(check_output(['getconf', 'PATH_MAX', '/'])) - # print(MAX_PATH) - except (Exception): - MAX_PATH = 4096 + logging.debug("infoURL : %s" % infoURL) + # print(infoURL) - if len(file_name) > MAX_PATH: - file_name = file_name[:MAX_PATH] + xml_page = sess.get(url=infoURL, headers=headers, cookies=cookies).text + logging.debug("xml_page : %s" % xml_page.encode("utf-8")) - if not path.exists("Output"): - makedirs("Output") + try: + m3u8_link_raw = str(search(r'\(.*?)\<\/file\>', xml_page).group(1)).strip().replace("&", "&") + logging.debug("m3u8_link_raw : %s" % m3u8_link_raw) - if path.isfile("Output/" + file_name): - print('[Anime-dl] File Exist! Skipping %s\n' % file_name) - pass - else: - self.subFetcher( - xml=str(xml_page), - anime_name=anime_name, - episode_number=episode_number) - # UNCOMMENT THIS LINE!!! - if rtmpDL == "True": - self.rtmpDump(host=hostLink, file=m3u8_link_raw, url=url, filename=file_name) - else: - m3u8_file = sess.get( - url=m3u8_link_raw, cookies=cookies, - headers=headers).text.splitlines()[2] - # print("M3u8 : %s" % m3u8_file) - ffmpeg_command = "ffmpeg -i \"%s\" -c copy -bsf:a aac_adtstoasc \"%s\"" % ( - m3u8_file, file_name) - logging.debug("ffmpeg_command : %s" % ffmpeg_command) - call(ffmpeg_command) - - for video_file in glob("*.mp4"): - try: - move(video_file, "Output") - except Exception as e: - print(str(e)) - pass - for sub_files in glob("*.ass"): - try: - move(sub_files, "Output") - except Exception as e: - print(str(e)) - pass - - if str(resolution).lower() in ['360p', '360', 'mobile']: - rtmpDL = "false" # Fix for #11 - print("Grabbing Links for 360p Streams.") - infoURL = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=106&video_quality=60¤t_page=%s" % ( - video_id, url) - xml_page = sess.get( - url=infoURL, headers=headers, cookies=cookies).text - # logging.debug("xml_page : %s" % xml_page) + if "mp4:" in m3u8_link_raw: + rtmpDL = "True" + hostLink = str(search(r'\(.*?)\<\/host\>', xml_page).group(1)).strip().replace("&", "&") - try: - m3u8_link_raw = str( - search(r'\(.*?)\<\/file\>', xml_page).group( - 1)).strip().replace("&", "&") - logging.debug("m3u8_link_raw : %s" % m3u8_link_raw) - if "mp4:" in m3u8_link_raw: - rtmpDL = "True" - hostLink = str( - search(r'\(.*?)\<\/host\>', xml_page).group( - 1)).strip().replace("&", "&") - - except Exception: - print("Error Found") - exit() - - anime_name = str( + except Exception: + print("Error Found") + exit() + anime_name = str( search(r'\(.*?)\<\/series_title\>', xml_page) .group(1)).strip().replace("’", "'").replace( ":", " - ").replace("'", "'") - logging.debug("anime_name : %s" % anime_name) + logging.debug("anime_name : %s" % anime_name) - episode_number = str( + episode_number = str( search(r'\(.*?)\<\/episode_number\>', xml_page).group(1)).strip() - logging.debug("episode_number : %s" % episode_number) + logging.debug("episode_number : %s" % episode_number) - width = str( + width = str( search(r'\(.*?)\<\/width\>', xml_page).group( 1)).strip() - logging.debug("width : %s" % width) + logging.debug("width : %s" % width) - height = str( + height = str( search(r'\(.*?)\<\/height\>', xml_page).group( 1)).strip() - logging.debug("height : %s" % height) + logging.debug("height : %s" % height) - # print("m3u8_link : %s\nanime_name : %s\nepisode_number : %s\nwidth : %s\nheight : %s\n" % (m3u8_link_raw, anime_name, episode_number, width, height)) - # self.subFetcher(xml=str(xml_page), anime_name=anime_name, episode_number=episode_number) - file_name = sub(r'[^A-Za-z0-9\ \-\' \\]+', '', str(anime_name)) + " - " + str( - episode_number) + " [%sx%s].mp4" % (width, height) - logging.debug("file_name : %s" % file_name) + reso = str(width).strip() + "X" + str(height).strip() - # print("File Name : %s\n" % file_name) - try: - MAX_PATH = int(check_output(['getconf', 'PATH_MAX', '/'])) - # print(MAX_PATH) - except (Exception): - MAX_PATH = 4096 + file_name = animeName.animeName().nameEdit(animeName = anime_name, episodeNumber = episode_number, resolution = reso) + # print(file_name) - if len(file_name) > MAX_PATH: - file_name = file_name[:MAX_PATH] + logging.debug("file_name : %s" % file_name) - if not path.exists("Output"): - makedirs("Output") + if not path.exists("Output"): + makedirs("Output") - if path.isfile("Output/" + file_name): - print('[Anime-dl] File Exist! Skipping %s\n' % file_name) - pass - else: - self.subFetcher( + if path.isfile("Output/" + file_name): + print('[Anime-dl] File Exist! Skipping %s\n' % file_name) + pass + else: + self.subFetcher( xml=str(xml_page), anime_name=anime_name, episode_number=episode_number) - # UNCOMMENT THIS LINE!!! - if rtmpDL == "True": - self.rtmpDump(host=hostLink, file=m3u8_link_raw, url=url, filename=file_name) - else: - m3u8_file = sess.get( + # UNCOMMENT THIS LINE!!! + if rtmpDL == "True": + self.rtmpDump(host=hostLink, file=m3u8_link_raw, url=url, filename=file_name) + else: + m3u8_file = sess.get( url=m3u8_link_raw, cookies=cookies, headers=headers).text.splitlines()[2] - # print("M3u8 : %s" % m3u8_file) - ffmpeg_command = "ffmpeg -i \"%s\" -c copy -bsf:a aac_adtstoasc \"%s\"" % ( + # print("M3u8 : %s" % m3u8_file) + ffmpeg_command = "ffmpeg -i \"%s\" -c copy -bsf:a aac_adtstoasc \"%s\"" % ( m3u8_file, file_name) - logging.debug("ffmpeg_command : %s" % ffmpeg_command) - call(ffmpeg_command) - - for video_file in glob("*.mp4"): - try: - move(video_file, "Output") - except Exception as e: - print(str(e)) - pass - for sub_files in glob("*.ass"): - try: - move(sub_files, "Output") - except Exception as e: - print(str(e)) - pass - - print("Completed Downloading : %s" % anime_name) - - return (video_id, m3u8_link_raw, anime_name, episode_number, width, - height, file_name, cookies, token) + logging.debug("ffmpeg_command : %s" % ffmpeg_command) + call(ffmpeg_command) + + for video_file in glob("*.mp4"): + try: + move(video_file, "Output") + except Exception as e: + print(str(e)) + pass + for sub_files in glob("*.ass"): + try: + move(sub_files, "Output") + except Exception as e: + print(str(e)) + pass + def wholeShow(self, url, cookie, token, language, resolution, skipper): # print("Check my patreon for this : http://patreon.com/Xonshiz") diff --git a/anime_dl/sites/funimation.py b/anime_dl/sites/funimation.py index 6d963d5..0e7df69 100644 --- a/anime_dl/sites/funimation.py +++ b/anime_dl/sites/funimation.py @@ -1,8 +1,109 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +from cfscrape import create_scraper +from requests import session +from bs4 import BeautifulSoup +import re +from subprocess import call +from time import sleep + class Funimation(object): - def __init__(self): - print("Under development!") - exit() \ No newline at end of file + def __init__(self, url, username, password, resolution, language): + + self.cookies = self.login(userUserName = username, userPassword = password) + # print("Cookies : %s\n\n\nSource : \n%s" % (self.cookies, self.pageSource)) + self.singleEpisode(url, self.cookies, resolution, language) + + def login(self, userUserName, userPassword): + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', + 'Territory': 'US' + + } + payload = {'username': '%s' % userUserName, 'password': '%s' % userPassword} + sess = session() + sess = create_scraper(sess) + + loginPost = sess.post(url='https://prod-api-funimationnow.dadcdigital.com/api/auth/login/', data=payload, + headers=headers) + + initialCookies = sess.cookies + + return initialCookies + + + def singleEpisode(self, url, userCookies, resolution, language): + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', + 'Territory': 'US' + + } + + sess = session() + sess = create_scraper(sess) + print("This lang : ", language) + + if language.lower() in ["japanese", "sub", "jpn"]: + finalUrl = str(url) + "?lang=english" + s = sess.get(finalUrl, headers=headers, cookies=userCookies) + + elif language.lower() in ["english", "dub", "eng"]: + finalUrl = str(url).replace("simulcast", "uncut") + "?lang=english" + print(finalUrl) + s = sess.get(finalUrl, headers=headers, cookies=userCookies) + print("Got this") + else: + s = sess.get(url + "?lang=english", headers=headers, cookies=userCookies) + + cookies = sess.cookies + + page_source = s.text.encode('utf-8') + htmlSource = str(BeautifulSoup(page_source, "lxml")) + + videoID = int(str(re.search('id\:\ \'(.*?)\'\,', htmlSource).group(1)).strip()) + seasonNumber = int(str(re.search('seasonNum: (.*?),', htmlSource).group(1)).strip()) + episodeNumber = int(str(re.search('episodeNum: (.*?),', htmlSource).group(1)).strip()) + showName = str( + re.search('KANE_customdimensions.showName\ \=\ \'(.*?)\'\;', htmlSource).group(1)).strip().replace("'", + "'").replace( + "&", "$") + fileName = str(showName) + " - " + str(episodeNumber) + ".mkv" + bDubNumber = int(str(re.search('"/player/(.*?)/\?bdub=0', htmlSource).group(1)).strip()) + print(videoID, seasonNumber, episodeNumber, showName, bDubNumber) + videoPlayerLink = "https://www.funimation.com/player/%s/?bdub=0" % bDubNumber + print(videoPlayerLink) + sleep(10) + headersNew = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', + 'Territory': 'US', + 'Referer' : '%s' % finalUrl + + } + playerSource = sess.get(videoPlayerLink, headers=headersNew).text + print(playerSource) + main_m3u8Link = str(re.search('"screenshots":(.*?)"],', playerSource).group(1)).strip().replace("[\"", "").replace("exp/", "") + print(main_m3u8Link) + try: + srtLink = str(re.search('"src":(.*?)\.srt"', playerSource).group(1)).strip().replace("[\"", "").replace("exp/", "") + print(srtLink) + except: + pass + + if resolution.lower() in ["1080p", "fhd", "1080"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer9.m3u8") + elif resolution.lower() in ["720p", "hd", "720"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer7.m3u8") + elif resolution.lower() in ["540p", "sd", "540"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer5.m3u8") + elif resolution.lower() in ["360p", "crap", "360"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer4.m3u8") + elif resolution.lower() in ["270p", "cancer", "270"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer2.m3u8") + elif resolution.lower() in ["234p", "killme", "234"]: + m3u8LinkFinal = main_m3u8Link.replace(".m3u8", "_Layer1.m3u8") + + print(m3u8LinkFinal) + ffmpegCommand = "ffmpeg -i \"%s\" -c copy \"%s\"" % (m3u8LinkFinal, fileName) + call(ffmpegCommand) diff --git a/anime_dl/version.py b/anime_dl/version.py index 88fbb1a..9d56a43 100644 --- a/anime_dl/version.py +++ b/anime_dl/version.py @@ -1,2 +1,2 @@ # Format : YY/MM/DD -__version__ = "2017.04.21" +__version__ = "2017.05.30" diff --git a/docs/Changelog.md b/docs/Changelog.md index b264da6..5e58016 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -10,4 +10,6 @@ - Fix for [6](https://github.com/Xonshiz/anime-dl/issues/6) and Fix for [3](https://github.com/Xonshiz/anime-dl/issues/3) [2017.04.13] - Fix for #9 [2017.04.13] - Added `Verbose Logging` [2017.04.13] -- Fix for #11 [2017.04.21] \ No newline at end of file +- Fix for #11 [2017.04.21] +- Re-write code to remove unnecessary parts [2017.05.30] +- Fix for #12 [2017.05.30] \ No newline at end of file