Index: channels/internet_radio_org_uk.py ================================================================== --- channels/internet_radio_org_uk.py +++ channels/internet_radio_org_uk.py @@ -82,17 +82,15 @@ (?:
(\d+)\s*Listeners)? """, re.S|re.X) #rx_homepage = re.compile('href="(http://[^"]+)"[^>]+target="_blank"') rx_pages = re.compile('href="/stations/[-+\w%\d\s]+/page(\d+)">\d+') rx_numbers = re.compile("(\d+)") - self.parent.status("downloading category pages...") # multiple pages + max = max(int(conf.internetradio_max_pages), 1) page = 1 - max = int(conf.internetradio_max_pages) - max = (max if max > 1 else 1) while page <= max: # fetch html = http.get(self.homepage + "stations/" + cat.lower().replace(" ", "%20") + "/" + ("page"+str(page) if page>1 else "")) Index: channels/jamendo.py ================================================================== --- channels/jamendo.py +++ channels/jamendo.py @@ -136,10 +136,11 @@ "homepage": e["shareurl"], #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]), "url": "http://api.jamendo.com/get2/stream/track/xspf/?playlist_id=%s&n=all&order=random&from=app-%s" % (e["id"], self.cid), "format": "application/xspf+xml", }) + self.parent.status(float(offset)/float(1000)) # albums elif cat in ["albums", "newest"]: for offset in self.retrieval_offsets(): data = http.get(self.api + "albums/musicinfo", params = { @@ -160,10 +161,11 @@ "homepage": e["shareurl"], #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]), "url": "http://api.jamendo.com/get2/stream/track/xspf/?album_id=%s&streamencoding=ogg2&n=all&from=app-%s" % (e["id"], self.cid), "format": "application/xspf+xml", }) + self.parent.status(float(offset)/float(1000)) # genre list else: for offset in self.retrieval_offsets(): data = http.get(self.api + "tracks", params={ @@ -188,10 +190,11 @@ "homepage": e["shareurl"], #"url": e["audio"], "url": "http://storage-new.newjamendo.com/?trackid=%s&format=ogg2&u=0&from=app-%s" % (e["id"], self.cid), "format": fmt, }) + self.parent.status(float(offset)/float(1000)) # done return entries Index: channels/live365.py ================================================================== --- channels/live365.py +++ channels/live365.py @@ -120,14 +120,14 @@ >DrawRatingStars\((\d+),\s+(\d+),.*? """, re.X|re.I|re.S|re.M) # src="(http://www.live365.com/.+?/stationlogo\w+.jpg)".+? # append entries to result list - __print__( dbg.DATA, html ) + #__print__( dbg.DATA, html ) ls = [] for row in rx.findall(html): - __print__( dbg.DATA, row ) + #__print__( dbg.DATA, row ) points = int(row[8]) count = int(row[9]) ls.append({ "launch_id": row[0], "sofo": row[0], # subscribe-or-fuck-off status flags Index: channels/modarchive.py ================================================================== --- channels/modarchive.py +++ channels/modarchive.py @@ -113,11 +113,11 @@ .*? >Rated\s*(\d+) """, re.X|re.S) for uu in rx_mod.findall(html): (url, id, fmt, title, file, rating) = uu - __print__( dbg.DATA, uu ) + #__print__( dbg.DATA, uu ) entries.append({ "genre": cat, "url": url, "id": id, "format": self.mime_fmt(fmt) + "+zip", Index: channels/shoutcast.py ================================================================== --- channels/shoutcast.py +++ channels/shoutcast.py @@ -115,11 +115,10 @@ #/radiolist.cfm?action=sub&string=&cat=Oldies&_cf_containerId=radiolist&_cf_nodebug=true&_cf_nocache=true&_cf_rc=0 #/radiolist.cfm?start=19&action=sub&string=&cat=Oldies&amount=18&order=listeners # page url = "http://www.shoutcast.com/radiolist.cfm?action=sub&string=&cat="+ucat+"&order=listeners&amount="+str(count) - __print__(dbg.HTTP, url) referer = "http://www.shoutcast.com/?action=sub&cat="+ucat params = {} html = http.get(url, params=params, referer=referer, ajax=1) #__print__(dbg.DATA, html) @@ -198,11 +197,10 @@ "genre": cat, }) # display partial results (not strictly needed anymore, because we fetch just one page) - self.parent.status() self.update_streams_partially_done(entries) # more pages to load? next = 99999 @@ -209,9 +207,9 @@ except Exception as e: __print__(dbg.ERR, e) return entries #fin - __print__(dbg.DATA, entries) + #__print__(dbg.DATA, entries) return entries Index: channels/surfmusik.py ================================================================== --- channels/surfmusik.py +++ channels/surfmusik.py @@ -123,11 +123,13 @@ }) # limit result list if i > max: break + if i % 10 == 0: + self.parent.status(float(i)/float(max+5)) i += 1 # done return entries Index: channels/xiph.py ================================================================== --- channels/xiph.py +++ channels/xiph.py @@ -102,11 +102,11 @@ #-- extract l = [] __print__( dbg.PROC, "processing api.dir.xiph.org JSON (via api.include-once.org cache)" ) data = json.loads(data) for e in data.values(): - __print__(dbg.DATA, e) + #__print__(dbg.DATA, e) bitrate = int(e["bitrate"]) if conf.xiph_min_bitrate and bitrate and bitrate >= int(conf.xiph_min_bitrate): l.append({ "title": e["stream_name"], "url": e["listen_url"], Index: channels/youtube.py ================================================================== --- channels/youtube.py +++ channels/youtube.py @@ -315,11 +315,11 @@ return data # API version 2.0s jsonified XML needs different unpacking: def wrap2(self, row): - __print__(dbg.DATA, row) + #__print__(dbg.DATA, row) return dict( genre = row["category"][1]["term"], title = row["title"]["$t"], playing = row["author"][0]["name"]["$t"], format = self.fmt,