import requests import tempfile import shutil import json import zipfile import os from concurrent.futures import ThreadPoolExecutor headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36"} def get_chapters(manga_id, language, group, chapter_range): api_url = "https://mangadex.org/api/manga/" + manga_id r = requests.get(api_url, headers=headers) data = r.json() chapters = data['chapter'].items() if chapter_range != None: ch_range = [str(x) for x in chapter_range] if language != None: chapters = [x for x in chapters if x[1]['lang_code'] == language] if chapter_range != None: chapters = [x for x in chapters if x[1]['chapter'] in ch_range] if group != None: chapters = [x for x in chapters if (x[1]['group_id'] == group) or (x[1]['group_id_2'] == group) or (x[1]['group_id'] == group)] ids = [x[0] for x in chapters] return ids def save_page(page): print("Downloading page " + str(page['index'] + 1)) response = requests.get(page['url'], stream=True, headers=headers) filetitle = os.path.join(page['dir'], str(page['index']) + "." + page['url'].split('.')[-1:][0]) with open(filetitle, 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response def save_chapter(chapter_id): pages_dir = tempfile.mkdtemp() api_url = "https://mangadex.org/api/chapter/" + chapter_id print(api_url) r = requests.get(api_url, headers=headers) data = r.json() pages = [] for page in range(len(data['page_array'])): if data['server'] == "/data/": page_url = "https://mangadex.org/data/" + data['hash'] + "/" + data['page_array'][page] else: page_url = data['server'] + data['hash'] + "/" + data['page_array'][page] pages.append({ "url": page_url, "index": page, "dir": pages_dir }) with ThreadPoolExecutor() as pool: pool.map(save_page,pages) filename = "Ch. {} - {}".format(data['chapter'],data['title']) shutil.make_archive(filename, "zip", pages_dir) shutil.move(filename + ".zip", filename + ".cbz") shutil.rmtree(pages_dir)