Index: channels/xiph.py ================================================================== --- channels/xiph.py +++ channels/xiph.py @@ -28,11 +28,12 @@ # # → "JSON cache" retrieves a refurbished JSON station list, # both sliceable genres and searchable. # # → "Clunky XML" fetches the olden YP.XML, which is really -# slow, then slices out genres. No search. +# slow, then slices out genres. No search. With the secret +# "buffy" mode keeps all streams buffered. # # → "Forbidden Fruits" extracts from dir.xiph.org HTML pages, # with homepages and listener/max infos available. Search # is also possible. # @@ -115,11 +116,10 @@ #-- extract l = [] data = json.loads(data) for e in data: - #log.DATA(e) if not len(l) or l[-1]["title"] != e["stream_name"]: l.append({ "title": e["stream_name"], "url": e["listen_url"], "format": e["type"], @@ -145,12 +145,14 @@ # enabling this buffer method prevents partial reloading.. if conf.xiph_source != "buffy": buffy = [] # Get XML blob - yp = ahttp.get(self.xml_url, statusmsg="Brace yourselves, still downloading the yp.xml blob.") - log.DATA("returned") + if not buffy: + yp = ahttp.get(self.xml_url, statusmsg="Brace yourselves, still downloading the yp.xml blob.") + else: + yp = "" self.status("Yes, XML parsing isn't much faster either.", timeout=20) for entry in xml.dom.minidom.parseString(yp).getElementsByTagName("entry"): buffy.append({ "title": x(entry, "server_name"), "url": x(entry, "listen_url"), @@ -168,17 +170,15 @@ # Filter out a single subtree l = [] if cat: rx = re.compile(cat.lower()) - l = [] - for row in buffy: - if rx.search(row["genre"]): - l.append(row) + l = [row for row in buffy if rx.search(row["genre"])] + # Search is actually no problem. Just don't want to. Nobody is using the YP.XML mode anyway.. elif search: - pass + pass # Result category return l