diff --git a/CHANGELOG b/CHANGELOG index 1e4cd82..2eaa2e4 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -5,6 +5,7 @@ List of things before release TODO: man pages for netcache, ansicat, opnk REGR: restore gemini certificate code REGR: implement grep +REGR: avoid asking for user input while in non-interactive mode This is an an experimental and unstable release. Lot of breakages are expected. Wait for 2.1 if you are not willing to do testing/bug reporting. - Licence has been changed to AGPL for ideological reasons @@ -12,8 +13,8 @@ Wait for 2.1 if you are not willing to do testing/bug reporting. - New command-line tool: "ansicat" - New command-line tool: "opnk" - URL do not default anymore to "gemini://" if not protocol are indicated. -- Images of html files are now always downloaded with the html (slower but better experience) -- Reading position is now saved for the whole session +- Images of html files are now downloaded with the html (slower but better experience) +- Reading position is saved for the whole session - Gopher-only: we don’t support naming a page after the name of the incoming link ## 1.10 - July 31st 2023 diff --git a/ansicat.py b/ansicat.py index b773e75..d6fa851 100755 --- a/ansicat.py +++ b/ansicat.py @@ -1131,7 +1131,9 @@ _FORMAT_RENDERERS = { } def get_mime(path): #Beware, this one is really a shaddy ad-hoc function - if path.startswith("mailto:"): + if not path: + return None + elif path.startswith("mailto:"): mime = "mailto" elif os.path.isdir(path): mime = "Local Folder" @@ -1165,6 +1167,8 @@ def get_mime(path): return mime def renderer_from_file(path,url=None): + if not path: + return None mime = get_mime(path) if not url: url = path diff --git a/netcache.py b/netcache.py index 870ed8c..086ae57 100755 --- a/netcache.py +++ b/netcache.py @@ -192,7 +192,7 @@ def get_cache_path(url): # Now, we have a partial path. Let’s make it full path. if local: cache_path = path - else: + elif scheme and host: cache_path = os.path.expanduser(_CACHE_PATH + scheme + "/" + host + path) #There’s an OS limitation of 260 characters per path. #We will thus cut the path enough to add the index afterward @@ -222,6 +222,10 @@ def get_cache_path(url): #and we try to access folder if os.path.isdir(cache_path): cache_path += "/" + index + else: + #URL is missing either a supported scheme or a valid host + #print("Error: %s is not a supported url"%url) + return None if len(cache_path) > 259: print("Path is too long. This is an OS limitation.\n\n") print(url) @@ -881,6 +885,7 @@ def _fetch_gemini(url,timeout=DEFAULT_TIMEOUT,**kwargs): if status == "11": user_input = getpass.getpass("> ") else: + #TODO:FIXME we should not ask for user input while non-interactive user_input = input("> ") return _fetch_gemini(query(user_input)) # Redirects diff --git a/offpunk.py b/offpunk.py index 5fe38e4..90b3233 100755 --- a/offpunk.py +++ b/offpunk.py @@ -1685,15 +1685,17 @@ Argument : duration of cache validity (in seconds).""" #we should only savetotour at the first level of recursion # The code for this was removed so, currently, we savetotour # at every level of recursion. - links = self.get_renderer(url).get_links(mode="links_only") - subcount = [0,len(links)] - d = depth - 1 - for k in links: - #recursive call (validity is always 0 in recursion) - substri = strin + " -->" - subcount[0] += 1 - fetch_url(k,depth=d,validity=0,savetotour=savetotour,\ - count=subcount,strin=substri) + r = self.get_renderer(url) + if r: + links = r.get_links(mode="links_only") + subcount = [0,len(links)] + d = depth - 1 + for k in links: + #recursive call (validity is always 0 in recursion) + substri = strin + " -->" + subcount[0] += 1 + fetch_url(k,depth=d,validity=0,savetotour=savetotour,\ + count=subcount,strin=substri) def fetch_list(list,validity=0,depth=1,tourandremove=False,tourchildren=False): links = self.list_get_links(list) end = len(links)