Index: channels/peertube.py ================================================================== --- channels/peertube.py +++ channels/peertube.py @@ -127,16 +127,16 @@ # The /embed/ section of the url can sometimes be substituted with: # · /videos/watch/UUID # · /static/streaming-playlists/hls/UUID/master.m3u8 # · /static/webseed/UUID.mp4 # Though that's sometimes blocked / or not consistently supported on all instances. -# Which is why resoslve_urn does an extra /api/v1/videos/uuid lookup. +# Which is why resolve_urn does an extra /api/v1/videos/uuid lookup. # class peertube (ChannelPlugin): # control attributes - listformat = "srv" + listformat = "href" has_search = True audioformat = "video/youtube" titles = dict( genre="Channel", title="Title", playing="Description", bitrate=False, listeners=False ) srv = conf.peertube_srv image_resize = 48 @@ -171,15 +171,17 @@ "sort": "-publishedAt", "nsfw": "false" } # fetch + map + self.status(0.9) entries = [] for video in self.api("videos", params): #log.DATA(video) entries.append(self.map_data(video)) #log.EN(json.dumps(entries, indent=4)) + self.status(1.0) return entries # peertube entry to streamtunter2 dict def map_data(self, v): url = "http://" + v["channel"]["host"] @@ -198,10 +200,11 @@ # fetch one or multiple pages from API def api(self, method, params={}, debug=False, count=200, **kw): r = [] for i in range(0, 5): + self.status(i / 6.0) add = json.loads( ahttp.get("http://{}/api/v1/{}".format(conf.peertube_srv, method), params, **kw) ) if not add.get("data"): return add Index: channels/radiobrowser.py ================================================================== --- channels/radiobrowser.py +++ channels/radiobrowser.py @@ -103,10 +103,11 @@ "stations/search", {"search": search, "limit": conf.max_streams} ) # topclick, topvote elif cat in self.pricat: + self.status(0.3) data = self.api( "stations/{}/{}".format(cat, conf.max_streams), {"limit": conf.max_streams} ) # empty category @@ -127,10 +128,11 @@ ) #data = self.api("stations/" + self.catmap[conf.radiobrowser_cat] + "/" + cat) if len(data) >= 5000: data = data[0:5000] + self.status(0.75) r = [] for e in data: r.append(dict( genre = e["tags"], @@ -166,15 +168,15 @@ # callback for general stream play event def click(self, row, channel): if not channel == self: return # fetch uuid, then register click - uuid = self.api("stations/byurl", {"url": row.get("url")}) + uuid = self.api("stations/byurl", {"url": row.get("url")}, quieter=1) if uuid: if isinstance(uuid, list): # just vote on the first entry uuid = uuid[0] - log.PROC_CLICK_COUNT(self.api("url/{}".format(uuid["stationuuid"]))) + log.CLICK(self.api("url/{}".format(uuid["stationuuid"], quieter=1))) # Add radio station to RBI def submit(self, *w): cn = self.parent.channel() Index: channels/tunein.py ================================================================== --- channels/tunein.py +++ channels/tunein.py @@ -89,11 +89,14 @@ def api(self, method): r = [] # fetch API page next = self.base + method max = int(conf.radiotime_maxpages) + i = 0.1 while next: + self.status(i / (12.5 + max * 0.7)) + i += 1.1 opml = ahttp.get(next) next = None x = ElementTree.fromstring(opml) # append entries for outline in x.findall(".//outline"): Index: channels/xiph.py ================================================================== --- channels/xiph.py +++ channels/xiph.py @@ -197,15 +197,15 @@ elif cat: url = "http://dir.xiph.org/genres/{}".format(cat.title()) # Collect all result pages html = ahttp.get(url) - for i in range(1,9): + for i in range(1, 9): m = re.search('href="[?]cursor=(\w+)">Next', html) if not m: break - self.status(i/5.1) + self.status(i / 11.1) html += ahttp.get(url, {"cursor": m.group(1)}) try: html = html.encode("raw_unicode_escape").decode("utf-8") except: pass Index: contrib/radiolist.py ================================================================== --- contrib/radiolist.py +++ contrib/radiolist.py @@ -73,10 +73,11 @@ # extract stream urls def update_streams(self, cat): entries = [] html = "" for i in range(1, int(int(conf.max_streams)/50)+1): + self.status(i / 11.0) html = html + ahttp.get("http://radiolist.net/genre/{}?paginate={}".format(cat.lower(), i)) if not html.find('?paginate={}">Next'.format(i+1)) >= 0: break for block in re.findall(self.recipe["block"], html, re.S|re.M): #log.HTML(block) Index: contrib/radionet.py ================================================================== --- contrib/radionet.py +++ contrib/radionet.py @@ -88,12 +88,14 @@ # Fetch entries def update_streams(self, cat, search=None): # category page, get key urlcat = cat.replace(" ", "-").lower() + self.status(0.1) html = ahttp.get(self.genre_url.format(urlcat)) for p in range(2, 4): + self.status(p / 5.5) if html.find('?p={}"'.format(p)) >= 0: html += ahttp.get(self.genre_url.format(urlcat) + "?p={}".format(p)) self.set_key(html) r = [] Index: contrib/rcast.py ================================================================== --- contrib/rcast.py +++ contrib/rcast.py @@ -52,10 +52,11 @@ html = "" if search: # pretty much identical (except first page should be /dir/?action=search and POST field) cat = search max_pages = 1 for i in range(1, max_pages + 1): + self.status(i / 12.5) html += ahttp.get("%s/%s/page%s" % (self.base, cat, i)) if not re.search('href="/dir/%s/page%s">Next' % (cat, i + 1), html): break # extract