"refactoring", full series downloads, other stuff

This commit is contained in:
nihilazo 2018-12-23 16:22:49 +00:00
parent 18fcdb5291
commit 3ec002c8eb
2 changed files with 78 additions and 39 deletions

58
main.py
View File

@ -1,47 +1,27 @@
#!/usr/bin/python
import requests
import mangadex
import argparse
import shutil
import zipfile
import os
from concurrent.futures import ThreadPoolExecutor
def save_page(page):
print("Downloading page " + str(page['index'] + 1))
response = requests.get(page['url'], stream=True)
filetitle = "tmp/"+ str(page['index']) + "." + page['url'].split('.')[-1:][0]
with open(filetitle, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
argparser = argparse.ArgumentParser()
argparser.add_argument("chapter")
argparser.add_argument("url", help="the manga URL. Can be a mangadex chapter (/chapter/) or manga series (/title/) URL.")
argparser.add_argument("-l","--language", help="the language code to download chapters in. Only relevent to series URLs.", default=None)
argparser.add_argument("-c","--chapterrange", help="the range of chapters to download, in \"start,end\" format. Only relevent to manga series URLs.", default=None)
argparser.add_argument("-g","--group", help="the transation group to download chapters from. This is a group ID not a group name. Only relevent to manga series URLs.", default=None)
args = argparser.parse_args()
chapter_api_url = "https://mangadex.org/api/chapter/" + args.chapter.split("/")[-1:][0]
r = requests.get(chapter_api_url)
data = r.json()
split_url = args.url.split("/")
pages = []
if args.chapterrange != None:
csplit = args.chapterrange.split[',']
crange = range(csplit[0],csplit[1]+1)
else:
crange = None
for page in range(len(data['page_array'])):
if data['server'] == "/data/":
page_url = "https://mangadex.org/data/" + data['hash'] + "/" + data['page_array'][page]
else:
page_url = data['server'] + data['hash'] + "/" + data['page_array'][page]
pages.append({
"url": page_url,
"index": page
})
print("DOWNLOADING CHAPTER")
if not os.path.exists("tmp"):
os.makedirs("tmp")
with ThreadPoolExecutor() as pool:
pool.map(save_page,pages)
print("CREATING CBZ")
filename = "Ch. {} - {}".format(data['chapter'],data['title'])
shutil.make_archive(filename, "zip", "tmp")
shutil.move(filename + ".zip", filename + ".cbz")
shutil.rmtree("tmp")
print("DONE CREATING CBZ")
for t in range(len(split_url)):
if split_url[t] == "chapter":
mangadex.save_chapter(split_url[t+1])
if split_url[t] == "title":
g = mangadex.get_chapters(split_url[t+1], args.language, args.group, crange)
for cid in g:
print("DOWNLOADING CHAPTER: " + cid)
mangadex.save_chapter(cid)

59
mangadex.py Normal file
View File

@ -0,0 +1,59 @@
import requests
import shutil
import zipfile
import os
from concurrent.futures import ThreadPoolExecutor
def get_chapters(manga_id, language, group, chapter_range):
api_url = "https://mangadex.org/api/manga/" + manga_id
r = requests.get(api_url)
data = r.json()
chapters = data['chapter'].items()
if chapter_range != None:
ch_range = [str(x) for x in chapter_range]
if language != None:
chapters = [x for x in chapters if x[1]['lang_code'] == language]
if chapter_range != None:
chapters = [x for x in chapters if x[1]['chapter'] in ch_range]
if group != None:
chapters = [x for x in chapters if (x[1]['group_id'] == group) or (x[1]['group_id_2'] == group) or (x[1]['group_id'] == group)]
ids = [x[0] for x in chapters]
return ids
def save_page(page):
print("Downloading page " + str(page['index'] + 1))
response = requests.get(page['url'], stream=True)
filetitle = "tmp/"+ str(page['index']) + "." + page['url'].split('.')[-1:][0]
with open(filetitle, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
def save_chapter(chapter_id):
api_url = "https://mangadex.org/api/chapter/" + chapter_id
r = requests.get(api_url)
data = r.json()
pages = []
for page in range(len(data['page_array'])):
if data['server'] == "/data/":
page_url = "https://mangadex.org/data/" + data['hash'] + "/" + data['page_array'][page]
else:
page_url = data['server'] + data['hash'] + "/" + data['page_array'][page]
pages.append({
"url": page_url,
"index": page
})
if not os.path.exists("tmp"):
os.makedirs("tmp")
with ThreadPoolExecutor() as pool:
pool.map(save_page,pages)
filename = "Ch. {} - {}".format(data['chapter'],data['title'])
shutil.make_archive(filename, "zip", "tmp")
shutil.move(filename + ".zip", filename + ".cbz")
shutil.rmtree("tmp")