Index: channels/__init__.py ================================================================== --- channels/__init__.py +++ channels/__init__.py @@ -28,11 +28,11 @@ import gtk from uikit import uikit, ver as gtk_ver from config import * -import ahttp as http +import ahttp import action import favicon import os.path import xml.sax.saxutils import re @@ -396,11 +396,11 @@ if not row.get("homepage"): url = self.rx_www_url.search(row.get("title", "")) if url: url = url.group(0).lower().replace(" ", "") url = (url if url.find("www.") == 0 else "www."+url) - row["homepage"] = http.fix_url(url) + row["homepage"] = ahttp.fix_url(url) return row # reload current stream from web directory Index: channels/icast.py ================================================================== --- channels/icast.py +++ channels/icast.py @@ -35,14 +35,14 @@ import re import json from config import * from channels import * -import ahttp as http +import ahttp -# Surfmusik sharing site +# iCast.io API class icast (ChannelPlugin): # control attributes has_search = True listformat = "pls" @@ -52,11 +52,11 @@ # Categories require little post-processing, just dict into list conversion def update_categories(self): self.categories = [] - for genre,cats in json.loads(http.get(self.base + "genres"))["genres"].items(): + for genre,cats in json.loads(ahttp.get(self.base + "genres"))["genres"].items(): self.categories.append(genre.title()) self.categories.append([c.title() for c in cats]) # Just copy over stream URLs and station titles def update_streams(self, cat, search=None): @@ -82,15 +82,15 @@ # fetch multiple pages def api(self, method, path, params): r = [] while len(r) < int(conf.max_streams): - data = json.loads(http.get( self.base + method + path, params)) + data = json.loads(ahttp.get( self.base + method + path, params)) r += data["stations"] if len(r) >= data["meta"]["total_count"] or len(data["stations"]) < 10: break else: params["page"] = int(data["meta"]["current_page"]) + 1 self.parent.status(params["page"] * 9.5 / float(conf.max_streams)) #log.DATA(data) return r Index: channels/internet_radio.py ================================================================== --- channels/internet_radio.py +++ channels/internet_radio.py @@ -27,11 +27,11 @@ from channels import * import re from config import * -import ahttp as http +import ahttp from pq import pq @@ -45,11 +45,11 @@ # load genres def update_categories(self): - html = http.get(self.base_url) + html = ahttp.get(self.base_url) rx = re.compile("""="/stations/[-+&.\w\s%]+/">([^<]+)<""") cats = rx.findall(html) cats = list(set(cats)) cats = [s.capitalize() for s in cats] self.categories = sorted(list(set(cats))) @@ -69,11 +69,11 @@ max_pages = max(int(conf.internetradio_max_pages), 1) for page in range(1, max_pages): # Append HTML source html.append( - http.get( + ahttp.get( self.base_url + "stations/" + cat.lower().replace(" ", "%20") + "/" + ("page"+str(page) if page>1 else "") ) ) @@ -126,11 +126,11 @@ # transform data r.append({ "url": url, "genre": self.strip_tags(genres or ""), - "homepage": http.fix_url(homepage or ""), + "homepage": ahttp.fix_url(homepage or ""), "title": (title or "").strip().replace("\n", " "), "playing": (playing or "").strip().replace("\n", " "), "bitrate": int(bitrate or 0), "listeners": int(listeners or 0), "format": "audio/mpeg", # there is no stream info on that, but internet-radio.org.uk doesn't seem very ogg-friendly anyway, so we assume the default here @@ -165,11 +165,11 @@ else: url = "" r.append({ "title": dir.find("h4").text(), - "homepage": http.fix_url(dir.find("a.small").attr("href")), + "homepage": ahttp.fix_url(dir.find("a.small").attr("href")), "url": url, "genre": dir.find("a[href^='/stations/']").text(), "listeners": int(bl[0]), "bitrate": int(bl[1]), "format": "audio/mpeg", Index: channels/itunes.py ================================================================== --- channels/itunes.py +++ channels/itunes.py @@ -32,11 +32,11 @@ # import re from config import * from channels import * -import ahttp as http +import ahttp # Surfmusik sharing site class itunes (ChannelPlugin): @@ -82,11 +82,11 @@ pass # Just copy over stream URLs and station titles def update_streams(self, cat): - m3u = http.get(self.base, {"category": cat.lower()}) + m3u = ahttp.get(self.base, {"category": cat.lower()}) if len(m3u) < 256: log.ERR(m3u) rx_m3u = re.compile(r""" ^File(\d+)\s*=\s*(http://[^\s]+)\s*$\s* Index: channels/jamendo.py ================================================================== --- channels/jamendo.py +++ channels/jamendo.py @@ -35,11 +35,11 @@ # module to extract the JAMJson format into pls/m3u/xspf. (The previous v2 API # retrieval is going to become inaccessible soon.) import re -import ahttp as http +import ahttp from config import * from channels import * import json @@ -377,11 +377,11 @@ "offset": 0, "limit": 200, }.items()) + list(params.items()) ) while (params["offset"] < max) and (len(r) % 200 == 0): - data = http.get(self.api_base + method, params) + data = ahttp.get(self.api_base + method, params) data = json.loads(data) if data: r += data["results"] else: return r Index: channels/live365.py ================================================================== --- channels/live365.py +++ channels/live365.py @@ -20,15 +20,14 @@ # entries and require a logon. This plugins tries to filter # thoise out. # streamtuner2 modules -from config import conf +from config import * from uikit import uikit -import ahttp as http +import ahttp from channels import * -from config import * import action # python modules import re import xml.dom.minidom @@ -88,11 +87,11 @@ # Retrieve genere index pages html = "" for i in [1, 17, 33, 49]: url = "http://www.live365.com/cgi-bin/directory.cgi?first=%i&site=web&mode=3&genre=%s&charset=UTF-8&target=content" % (i, cat.lower()) - html += http.get(url, feedback=self.parent.status) + html += ahttp.get(url, feedback=self.parent.status) # Extract from JavaScript rx = re.compile(r""" stn.set\( " (\w+) ", \s+ " ((?:[^"\\]+|\\.)*) "\); \s+ """, re.X|re.I|re.S|re.M) Index: channels/modarchive.py ================================================================== --- channels/modarchive.py +++ channels/modarchive.py @@ -25,11 +25,11 @@ # most reliably. See the help on how to define wget/curl to download # them as well. import re -import ahttp as http +import ahttp from config import conf from channels import * from config import * @@ -54,11 +54,11 @@ # refresh category list def update_categories(self): - html = http.get("http://modarchive.org/index.php?request=view_genres") + html = ahttp.get("http://modarchive.org/index.php?request=view_genres") rx_current = re.compile(r""" >\s+(\w[^<>]+)\s+ | ]+query=(\d+)&[^>]+>(\w[^<]+) """, re.S|re.X) @@ -86,11 +86,11 @@ # download links from dmoz listing def update_streams(self, cat): url = "http://modarchive.org/index.php" params = dict(query=self.catmap[cat], request="search", search_type="genre") - html = http.get(url, params) + html = ahttp.get(url, params) entries = [] rx_mod = re.compile(""" href="(http://api\.modarchive\.org/downloads\.php[?]moduleid=(\d+)[#][^"]+)" .*? /formats/(\w+)\.png" Index: channels/myoggradio.py ================================================================== --- channels/myoggradio.py +++ channels/myoggradio.py @@ -29,11 +29,11 @@ from channels import * from config import * import action from uikit import uikit -import ahttp as http +import ahttp import re import json from compat2and3 import StringIO import copy @@ -78,16 +78,16 @@ entries = [] # common if (cat == "common"): # fetch - data = http.get(self.api + "common.json") + data = ahttp.get(self.api + "common.json") entries = json.load(StringIO(data)) # bookmarks elif (cat == "personal") and self.user_pw(): - data = http.get(self.api + "favoriten.json?user=" + self.user_pw()[0]) + data = ahttp.get(self.api + "favoriten.json?user=" + self.user_pw()[0]) entries = json.load(StringIO(data)) # unknown else: self.parent.status("Unknown category") @@ -111,11 +111,11 @@ if row: row = copy.copy(row) # convert PLS/M3U link to direct ICY stream url if conf.myoggradio_morph and self.parent.channel().listformat != "url/direct": - row["url"] = http.fix_url(action.srv(row["url"])) + row["url"] = ahttp.fix_url(action.srv(row["url"])) # prevent double check-ins if row["title"] in (r.get("title") for r in self.streams["common"]): pass elif row["url"] in (r.get("url") for r in self.streams["common"]): @@ -154,23 +154,23 @@ } # just push data in, like the form does if form: self.login() - http.get(self.api + "c_neu.jsp", params=submit, ajax=1, post=1) + ahttp.get(self.api + "c_neu.jsp", params=submit, ajax=1, post=1) # use JSON interface else: - http.get(self.api + "commonadd.json", params=submit, ajax=1) + ahttp.get(self.api + "commonadd.json", params=submit, ajax=1) # authenticate against MyOggRadio def login(self): login = self.user_pw() if login: data = dict(zip(["benutzer", "passwort"], login)) - http.get(self.api + "c_login.jsp", params=data, ajax=1) + ahttp.get(self.api + "c_login.jsp", params=data, ajax=1) # let's hope the JSESSIONID cookie is kept # returns login (user,pw) def user_pw(self): Index: channels/punkcast.py ================================================================== --- channels/punkcast.py +++ channels/punkcast.py @@ -22,11 +22,11 @@ # historic reasons. It was one of the default streamtuner1 # channels. import re -import ahttp as http +import ahttp from config import conf import action from channels import * from config import * @@ -54,11 +54,11 @@ """, re.S|re.X) entries = [] #-- all from frontpage - html = http.get("http://www.punkcast.com/") + html = ahttp.get("http://www.punkcast.com/") for uu in rx_link.findall(html): (homepage, id, title) = uu entries.append({ "genre": "%s" % id, "title": title, @@ -75,11 +75,11 @@ # special handler for play def play(self, row): rx_sound = re.compile("""(http://[^"<>]+[.](mp3|ogg|m3u|pls|ram))""") - html = http.get(row["homepage"]) + html = ahttp.get(row["homepage"]) # look up ANY audio url for uu in rx_sound.findall(html): log.DATA( uu ) (url, fmt) = uu Index: channels/radiobrowser.py ================================================================== --- channels/radiobrowser.py +++ channels/radiobrowser.py @@ -35,11 +35,11 @@ import re import json from config import * from channels import * -import ahttp as http +import ahttp # API endpoints: # http://www.radio-browser.info/webservice/json/countries # http://www.radio-browser.info/webservice/json/languages @@ -110,11 +110,11 @@ return r # fetch multiple pages def api(self, method, params={}): - j = http.get(self.base + method, params) + j = ahttp.get(self.base + method, params) try: return json.loads(j, strict=False) # some entries contain invalid character encodings except: return [] Index: channels/shoutcast.py ================================================================== --- channels/shoutcast.py +++ channels/shoutcast.py @@ -21,11 +21,11 @@ # # It has been aquired by Radionomy in 2014, since then significant changes # took place. The former yellow pages API got deprecated. -import ahttp as http +import ahttp from json import loads as json_decode import re from config import * from channels import * from pq import pq @@ -64,11 +64,11 @@ # Extracts the category list from www.shoutcast.com, # stores a catmap (title => id) def update_categories(self): - html = http.get(self.base_url) + html = ahttp.get(self.base_url) #log.DATA( html ) self.categories = [] # Genre list in sidebar """
  • Adult
  • """ @@ -101,11 +101,11 @@ # page url = "http://www.shoutcast.com/Home/BrowseByGenre" params = { "genrename": cat } referer = None try: - json = http.get(url, params=params, referer=referer, post=1, ajax=1) + json = ahttp.get(url, params=params, referer=referer, post=1, ajax=1) json = json_decode(json) except: log.ERR("HTTP request or JSON decoding failed. Outdated python/requests perhaps.") return [] self.parent.status(0.75) Index: channels/surfmusik.py ================================================================== --- channels/surfmusik.py +++ channels/surfmusik.py @@ -31,11 +31,11 @@ # # # import re -import ahttp as http +import ahttp from config import * from channels import * @@ -94,11 +94,11 @@ r = [] # Add main categories, and fetch subentries (genres or country names) for cat in cats[lang]: r.append(cat) if map.get(cat): - subcats = rx_links.findall( http.get(base_url + map[cat]) ) + subcats = rx_links.findall( ahttp.get(base_url + map[cat]) ) subcats = [x.replace("+", " ").title() for x in subcats] r.append(sorted(subcats)) self.categories = r @@ -129,11 +129,11 @@ else: path = path_country if path is not None: ucat = cat.replace(" ", "+").lower() - html = http.get(base_url + path + ucat + ".html") + html = ahttp.get(base_url + path + ucat + ".html") html = re.sub("&#x?\d+;", "", html) rx_radio = re.compile(r""" ]*\s+href="(.+?)"[^>]*> .*? ]*>([^<>]+) @@ -148,11 +148,11 @@ for uu in rx_radio.findall(html): (url, homepage, name, genre, city) = uu # find mms:// for webtv stations if is_tv: - m = rx_video.search(http.get(url)) + m = rx_video.search(ahttp.get(url)) if m: url = m.group(1) # just convert /radio/ into /m3u/ link else: url = "http://www.surfmusik.de/m3u/" + url[30:-5] + ".m3u" Index: channels/tunein.py ================================================================== --- channels/tunein.py +++ channels/tunein.py @@ -26,11 +26,11 @@ import re import json from config import * from channels import * -import ahttp as http +import ahttp from xml.etree import ElementTree # TuneIn radio directory class tunein (ChannelPlugin): @@ -84,12 +84,12 @@ # Fetch OPML, convert outline elements to dicts def api(self, method): r = [] - opml = http.get(self.base + method) + opml = ahttp.get(self.base + method) x = ElementTree.fromstring(opml) for outline in x.findall(".//outline"): r.append(dict(outline.items())) return r Index: channels/xiph.py ================================================================== --- channels/xiph.py +++ channels/xiph.py @@ -27,11 +27,11 @@ # from config import * from uikit import uikit -import ahttp as http +import ahttp from channels import * #from xml.sax.saxutils import unescape as entity_decode, escape as xmlentities #import xml.dom.minidom import json import re @@ -102,11 +102,11 @@ params["cat"] = cat.lower() if search: params["search"] = search #-- get data - data = http.get(self.json_url, params=params) + data = ahttp.get(self.json_url, params=params) #log.DATA(data) #-- extract l = [] log.PROC( "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )