initial commit
commit
18fcdb5291
|
@ -0,0 +1,4 @@
|
|||
tmp
|
||||
*.cbz
|
||||
__pycache__
|
||||
*.pyc
|
|
@ -0,0 +1,47 @@
|
|||
#!/usr/bin/python
|
||||
import requests
|
||||
import argparse
|
||||
import shutil
|
||||
import zipfile
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
def save_page(page):
|
||||
print("Downloading page " + str(page['index'] + 1))
|
||||
response = requests.get(page['url'], stream=True)
|
||||
filetitle = "tmp/"+ str(page['index']) + "." + page['url'].split('.')[-1:][0]
|
||||
with open(filetitle, 'wb') as out_file:
|
||||
shutil.copyfileobj(response.raw, out_file)
|
||||
del response
|
||||
|
||||
argparser = argparse.ArgumentParser()
|
||||
argparser.add_argument("chapter")
|
||||
args = argparser.parse_args()
|
||||
|
||||
chapter_api_url = "https://mangadex.org/api/chapter/" + args.chapter.split("/")[-1:][0]
|
||||
r = requests.get(chapter_api_url)
|
||||
data = r.json()
|
||||
|
||||
pages = []
|
||||
|
||||
for page in range(len(data['page_array'])):
|
||||
if data['server'] == "/data/":
|
||||
page_url = "https://mangadex.org/data/" + data['hash'] + "/" + data['page_array'][page]
|
||||
else:
|
||||
page_url = data['server'] + data['hash'] + "/" + data['page_array'][page]
|
||||
pages.append({
|
||||
"url": page_url,
|
||||
"index": page
|
||||
})
|
||||
|
||||
print("DOWNLOADING CHAPTER")
|
||||
if not os.path.exists("tmp"):
|
||||
os.makedirs("tmp")
|
||||
with ThreadPoolExecutor() as pool:
|
||||
pool.map(save_page,pages)
|
||||
print("CREATING CBZ")
|
||||
filename = "Ch. {} - {}".format(data['chapter'],data['title'])
|
||||
shutil.make_archive(filename, "zip", "tmp")
|
||||
shutil.move(filename + ".zip", filename + ".cbz")
|
||||
shutil.rmtree("tmp")
|
||||
print("DONE CREATING CBZ")
|
Reference in New Issue