Check-in [493be36226]
Comment: | Exchanged all "import ahttp as http" for plain `ahttp` module usage. |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
493be362260d1bbc184604e10c0732ef |
User & Date: | mario on 2015-04-29 22:55:24 |
Other Links: | manifest | tags |
2015-04-29
| ||
22:59 | Undo itertools.chain(), use plain list appending. check-in: 051366205f user: mario tags: trunk | |
22:55 | Exchanged all "import ahttp as http" for plain `ahttp` module usage. check-in: 493be36226 user: mario tags: trunk | |
22:54 | PublicRadioFan directory plugin. (Works ok, but fetching is somewhat slow.) check-in: 747be98229 user: mario tags: trunk | |
Modified channels/__init__.py from [162b9d0f3b] to [3d68982aa3].
︙ | ︙ | |||
26 27 28 29 30 31 32 | # GtkBuilder description. They derive from ChannelPlugins therefore, # which constructs and registers the required gtk widgets manually. import gtk from uikit import uikit, ver as gtk_ver from config import * | | | 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | # GtkBuilder description. They derive from ChannelPlugins therefore, # which constructs and registers the required gtk widgets manually. import gtk from uikit import uikit, ver as gtk_ver from config import * import ahttp import action import favicon import os.path import xml.sax.saxutils import re import copy import inspect |
︙ | ︙ | |||
394 395 396 397 398 399 400 | # deduce homepage URLs from title # by looking for www.xyz.com domain names if not row.get("homepage"): url = self.rx_www_url.search(row.get("title", "")) if url: url = url.group(0).lower().replace(" ", "") url = (url if url.find("www.") == 0 else "www."+url) | | | 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 | # deduce homepage URLs from title # by looking for www.xyz.com domain names if not row.get("homepage"): url = self.rx_www_url.search(row.get("title", "")) if url: url = url.group(0).lower().replace(" ", "") url = (url if url.find("www.") == 0 else "www."+url) row["homepage"] = ahttp.fix_url(url) return row # reload current stream from web directory def reload(self): self.load(self.current, force=1) |
︙ | ︙ |
Modified channels/icast.py from [84d1b6b174] to [2c4e5b4f8f].
︙ | ︙ | |||
33 34 35 36 37 38 39 | # collect 200 station entries (see main options). import re import json from config import * from channels import * | | | | | 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | # collect 200 station entries (see main options). import re import json from config import * from channels import * import ahttp # iCast.io API class icast (ChannelPlugin): # control attributes has_search = True listformat = "pls" titles = dict(listeners=False, bitrate=False, playing=False) categories = [] base = "http://api.icast.io/1/" # Categories require little post-processing, just dict into list conversion def update_categories(self): self.categories = [] for genre,cats in json.loads(ahttp.get(self.base + "genres"))["genres"].items(): self.categories.append(genre.title()) self.categories.append([c.title() for c in cats]) # Just copy over stream URLs and station titles def update_streams(self, cat, search=None): if cat: |
︙ | ︙ | |||
80 81 82 83 84 85 86 | return r # fetch multiple pages def api(self, method, path, params): r = [] while len(r) < int(conf.max_streams): | | | 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | return r # fetch multiple pages def api(self, method, path, params): r = [] while len(r) < int(conf.max_streams): data = json.loads(ahttp.get( self.base + method + path, params)) r += data["stations"] if len(r) >= data["meta"]["total_count"] or len(data["stations"]) < 10: break else: params["page"] = int(data["meta"]["current_page"]) + 1 self.parent.status(params["page"] * 9.5 / float(conf.max_streams)) #log.DATA(data) return r |
Modified channels/internet_radio.py from [7d08db49a9] to [738c122eb2].
︙ | ︙ | |||
25 26 27 28 29 30 31 | # setting, rather than the global max_streams option. # from channels import * import re from config import * | | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | # setting, rather than the global max_streams option. # from channels import * import re from config import * import ahttp from pq import pq # streams and gui class internet_radio (ChannelPlugin): # control data listformat = "pls" categories = [] base_url = "https://www.internet-radio.com/" # load genres def update_categories(self): html = ahttp.get(self.base_url) rx = re.compile("""="/stations/[-+&.\w\s%]+/">([^<]+)<""") cats = rx.findall(html) cats = list(set(cats)) cats = [s.capitalize() for s in cats] self.categories = sorted(list(set(cats))) |
︙ | ︙ | |||
67 68 69 70 71 72 73 | # Fetch multiple pages at once html = [] max_pages = max(int(conf.internetradio_max_pages), 1) for page in range(1, max_pages): # Append HTML source html.append( | | | 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | # Fetch multiple pages at once html = [] max_pages = max(int(conf.internetradio_max_pages), 1) for page in range(1, max_pages): # Append HTML source html.append( ahttp.get( self.base_url + "stations/" + cat.lower().replace(" ", "%20") + "/" + ("page"+str(page) if page>1 else "") ) ) # Is there a next page? |
︙ | ︙ | |||
124 125 126 127 128 129 130 | if uu: (url, title, playing, homepage, genres, listeners, bitrate) = uu.groups() # transform data r.append({ "url": url, "genre": self.strip_tags(genres or ""), | | | 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | if uu: (url, title, playing, homepage, genres, listeners, bitrate) = uu.groups() # transform data r.append({ "url": url, "genre": self.strip_tags(genres or ""), "homepage": ahttp.fix_url(homepage or ""), "title": (title or "").strip().replace("\n", " "), "playing": (playing or "").strip().replace("\n", " "), "bitrate": int(bitrate or 0), "listeners": int(listeners or 0), "format": "audio/mpeg", # there is no stream info on that, but internet-radio.org.uk doesn't seem very ogg-friendly anyway, so we assume the default here }) else: |
︙ | ︙ | |||
163 164 165 166 167 168 169 | else: url = "" else: url = "" r.append({ "title": dir.find("h4").text(), | | | 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | else: url = "" else: url = "" r.append({ "title": dir.find("h4").text(), "homepage": ahttp.fix_url(dir.find("a.small").attr("href")), "url": url, "genre": dir.find("a[href^='/stations/']").text(), "listeners": int(bl[0]), "bitrate": int(bl[1]), "format": "audio/mpeg", "playing": dir.find("b").text(), }) return r |
Modified channels/itunes.py from [43771b8835] to [7f3b7c5cee].
︙ | ︙ | |||
30 31 32 33 34 35 36 | # # In this module only iTunes will be queried for now. # import re from config import * from channels import * | | | 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | # # In this module only iTunes will be queried for now. # import re from config import * from channels import * import ahttp # Surfmusik sharing site class itunes (ChannelPlugin): # control attribues has_search = False |
︙ | ︙ | |||
80 81 82 83 84 85 86 | # static list for iTunes def update_categories(self): pass # Just copy over stream URLs and station titles def update_streams(self, cat): | | | 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | # static list for iTunes def update_categories(self): pass # Just copy over stream URLs and station titles def update_streams(self, cat): m3u = ahttp.get(self.base, {"category": cat.lower()}) if len(m3u) < 256: log.ERR(m3u) rx_m3u = re.compile(r""" ^File(\d+)\s*=\s*(http://[^\s]+)\s*$\s* ^Title\1\s*=\s*([^\r\n]+)\s*$\s* """, re.M|re.I|re.X) |
︙ | ︙ |
Modified channels/jamendo.py from [e32838ce56] to [7a2d2e43d6].
︙ | ︙ | |||
33 34 35 36 37 38 39 | # Per default Ogg Vorbis is used as streaming format. Track URLs can be played # back directly. Playlists and albums now require a roundtrip over the action # module to extract the JAMJson format into pls/m3u/xspf. (The previous v2 API # retrieval is going to become inaccessible soon.) import re | | | 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | # Per default Ogg Vorbis is used as streaming format. Track URLs can be played # back directly. Playlists and albums now require a roundtrip over the action # module to extract the JAMJson format into pls/m3u/xspf. (The previous v2 API # retrieval is going to become inaccessible soon.) import re import ahttp from config import * from channels import * import json # jamendo CC music sharing site # |
︙ | ︙ | |||
375 376 377 378 379 380 381 | "audioformat": "mp32", "imagesize": conf.jamendo_image_size, "offset": 0, "limit": 200, }.items()) + list(params.items()) ) while (params["offset"] < max) and (len(r) % 200 == 0): | | | 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | "audioformat": "mp32", "imagesize": conf.jamendo_image_size, "offset": 0, "limit": 200, }.items()) + list(params.items()) ) while (params["offset"] < max) and (len(r) % 200 == 0): data = ahttp.get(self.api_base + method, params) data = json.loads(data) if data: r += data["results"] else: return r params["offset"] += 200; self.parent.status(float(params["offset"])/float(max+17)) |
︙ | ︙ |
Modified channels/live365.py from [43a955e299] to [73c3b74f30].
︙ | ︙ | |||
18 19 20 21 22 23 24 | # # Live365 lists around 5000 radio stations. Some are paid # entries and require a logon. This plugins tries to filter # thoise out. # streamtuner2 modules | | | < | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | # # Live365 lists around 5000 radio stations. Some are paid # entries and require a logon. This plugins tries to filter # thoise out. # streamtuner2 modules from config import * from uikit import uikit import ahttp from channels import * import action # python modules import re import xml.dom.minidom from xml.sax.saxutils import unescape as entity_decode, escape as xmlentities import gtk |
︙ | ︙ | |||
86 87 88 89 90 91 92 | # extract stream infos def update_streams(self, cat): # Retrieve genere index pages html = "" for i in [1, 17, 33, 49]: url = "http://www.live365.com/cgi-bin/directory.cgi?first=%i&site=web&mode=3&genre=%s&charset=UTF-8&target=content" % (i, cat.lower()) | | | 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | # extract stream infos def update_streams(self, cat): # Retrieve genere index pages html = "" for i in [1, 17, 33, 49]: url = "http://www.live365.com/cgi-bin/directory.cgi?first=%i&site=web&mode=3&genre=%s&charset=UTF-8&target=content" % (i, cat.lower()) html += ahttp.get(url, feedback=self.parent.status) # Extract from JavaScript rx = re.compile(r""" stn.set\( " (\w+) ", \s+ " ((?:[^"\\]+|\\.)*) "\); \s+ """, re.X|re.I|re.S|re.M) # Group entries before adding them |
︙ | ︙ |
Modified channels/modarchive.py from [e1c513f5b2] to [932f509dd8].
︙ | ︙ | |||
23 24 25 26 27 28 29 | # # Configuring VLC for `audio/mod+zip` or just a generic `*/*` works # most reliably. See the help on how to define wget/curl to download # them as well. import re | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | # # Configuring VLC for `audio/mod+zip` or just a generic `*/*` works # most reliably. See the help on how to define wget/curl to download # them as well. import re import ahttp from config import conf from channels import * from config import * # The MOD Archive # |
︙ | ︙ | |||
52 53 54 55 56 57 58 | catmap = {"Chiptune": "54", "Electronic - Ambient": "2", "Electronic - Other": "100", "Rock (general)": "13", "Trance - Hard": "64", "Swing": "75", "Rock - Soft": "15", "R & B": "26", "Big Band": "74", "Ska": "24", "Electronic - Rave": "65", "Electronic - Progressive": "11", "Piano": "59", "Comedy": "45", "Christmas": "72", "Chillout": "106", "Reggae": "27", "Electronic - Industrial": "34", "Grunge": "103", "Medieval": "28", "Demo Style": "55", "Orchestral": "50", "Soundtrack": "43", "Electronic - Jungle": "60", "Fusion": "102", "Electronic - IDM": "99", "Ballad": "56", "Country": "18", "World": "42", "Jazz - Modern": "31", "Video Game": "8", "Funk": "32", "Electronic - Drum & Bass": "6", "Alternative": "48", "Electronic - Minimal": "101", "Electronic - Gabber": "40", "Vocal Montage": "76", "Metal (general)": "36", "Electronic - Breakbeat": "9", "Soul": "25", "Electronic (general)": "1", "Punk": "35", "Pop - Synth": "61", "Electronic - Dance": "3", "Pop (general)": "12", "Trance - Progressive": "85", "Trance (general)": "71", "Disco": "58", "Electronic - House": "10", "Experimental": "46", "Trance - Goa": "66", "Rock - Hard": "14", "Trance - Dream": "67", "Spiritual": "47", "Metal - Extreme": "37", "Jazz (general)": "29", "Trance - Tribal": "70", "Classical": "20", "Hip-Hop": "22", "Bluegrass": "105", "Halloween": "82", "Jazz - Acid": "30", "Easy Listening": "107", "New Age": "44", "Fantasy": "52", "Blues": "19", "Other": "41", "Trance - Acid": "63", "Gothic": "38", "Electronic - Hardcore": "39", "One Hour Compo": "53", "Pop - Soft": "62", "Electronic - Techno": "7", "Religious": "49", "Folk": "21"} categories = [] # refresh category list def update_categories(self): | | | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | catmap = {"Chiptune": "54", "Electronic - Ambient": "2", "Electronic - Other": "100", "Rock (general)": "13", "Trance - Hard": "64", "Swing": "75", "Rock - Soft": "15", "R & B": "26", "Big Band": "74", "Ska": "24", "Electronic - Rave": "65", "Electronic - Progressive": "11", "Piano": "59", "Comedy": "45", "Christmas": "72", "Chillout": "106", "Reggae": "27", "Electronic - Industrial": "34", "Grunge": "103", "Medieval": "28", "Demo Style": "55", "Orchestral": "50", "Soundtrack": "43", "Electronic - Jungle": "60", "Fusion": "102", "Electronic - IDM": "99", "Ballad": "56", "Country": "18", "World": "42", "Jazz - Modern": "31", "Video Game": "8", "Funk": "32", "Electronic - Drum & Bass": "6", "Alternative": "48", "Electronic - Minimal": "101", "Electronic - Gabber": "40", "Vocal Montage": "76", "Metal (general)": "36", "Electronic - Breakbeat": "9", "Soul": "25", "Electronic (general)": "1", "Punk": "35", "Pop - Synth": "61", "Electronic - Dance": "3", "Pop (general)": "12", "Trance - Progressive": "85", "Trance (general)": "71", "Disco": "58", "Electronic - House": "10", "Experimental": "46", "Trance - Goa": "66", "Rock - Hard": "14", "Trance - Dream": "67", "Spiritual": "47", "Metal - Extreme": "37", "Jazz (general)": "29", "Trance - Tribal": "70", "Classical": "20", "Hip-Hop": "22", "Bluegrass": "105", "Halloween": "82", "Jazz - Acid": "30", "Easy Listening": "107", "New Age": "44", "Fantasy": "52", "Blues": "19", "Other": "41", "Trance - Acid": "63", "Gothic": "38", "Electronic - Hardcore": "39", "One Hour Compo": "53", "Pop - Soft": "62", "Electronic - Techno": "7", "Religious": "49", "Folk": "21"} categories = [] # refresh category list def update_categories(self): html = ahttp.get("http://modarchive.org/index.php?request=view_genres") rx_current = re.compile(r""" >\s+(\w[^<>]+)\s+</h1> | <a\s[^>]+query=(\d+)&[^>]+>(\w[^<]+)</a> """, re.S|re.X) |
︙ | ︙ | |||
84 85 86 87 88 89 90 | # download links from dmoz listing def update_streams(self, cat): url = "http://modarchive.org/index.php" params = dict(query=self.catmap[cat], request="search", search_type="genre") | | | 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | # download links from dmoz listing def update_streams(self, cat): url = "http://modarchive.org/index.php" params = dict(query=self.catmap[cat], request="search", search_type="genre") html = ahttp.get(url, params) entries = [] rx_mod = re.compile(""" href="(http://api\.modarchive\.org/downloads\.php[?]moduleid=(\d+)[#][^"]+)" .*? /formats/(\w+)\.png" .*? title="([^">]+)">([^<>]+)</a> .*? >(?:Rated|Unrated)</a>\s*(\d*) |
︙ | ︙ |
Modified channels/myoggradio.py from [cce93c4126] to [cb87a3782b].
︙ | ︙ | |||
27 28 29 30 31 32 33 | # from channels import * from config import * import action from uikit import uikit | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | # from channels import * from config import * import action from uikit import uikit import ahttp import re import json from compat2and3 import StringIO import copy from uikit import gtk |
︙ | ︙ | |||
76 77 78 79 80 81 82 | # result list entries = [] # common if (cat == "common"): # fetch | | | | 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | # result list entries = [] # common if (cat == "common"): # fetch data = ahttp.get(self.api + "common.json") entries = json.load(StringIO(data)) # bookmarks elif (cat == "personal") and self.user_pw(): data = ahttp.get(self.api + "favoriten.json?user=" + self.user_pw()[0]) entries = json.load(StringIO(data)) # unknown else: self.parent.status("Unknown category") pass |
︙ | ︙ | |||
109 110 111 112 113 114 115 | # get data row = self.parent.row() if row: row = copy.copy(row) # convert PLS/M3U link to direct ICY stream url if conf.myoggradio_morph and self.parent.channel().listformat != "url/direct": | | | 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | # get data row = self.parent.row() if row: row = copy.copy(row) # convert PLS/M3U link to direct ICY stream url if conf.myoggradio_morph and self.parent.channel().listformat != "url/direct": row["url"] = ahttp.fix_url(action.srv(row["url"])) # prevent double check-ins if row["title"] in (r.get("title") for r in self.streams["common"]): pass elif row["url"] in (r.get("url") for r in self.streams["common"]): pass |
︙ | ︙ | |||
152 153 154 155 156 157 158 | "typ": e["format"][6:], "eintragen": "eintragen", # form } # just push data in, like the form does if form: self.login() | | | | | 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | "typ": e["format"][6:], "eintragen": "eintragen", # form } # just push data in, like the form does if form: self.login() ahttp.get(self.api + "c_neu.jsp", params=submit, ajax=1, post=1) # use JSON interface else: ahttp.get(self.api + "commonadd.json", params=submit, ajax=1) # authenticate against MyOggRadio def login(self): login = self.user_pw() if login: data = dict(zip(["benutzer", "passwort"], login)) ahttp.get(self.api + "c_login.jsp", params=data, ajax=1) # let's hope the JSESSIONID cookie is kept # returns login (user,pw) def user_pw(self): if len(conf.myoggradio_login) and conf.myoggradio_login != "user:password": return conf.myoggradio_login.split(":") |
︙ | ︙ |
Modified channels/punkcast.py from [2ac5815def] to [f6c4524b4e].
︙ | ︙ | |||
20 21 22 23 24 25 26 | # # Punkcast is no longer updated. This plugin is kept for # historic reasons. It was one of the default streamtuner1 # channels. import re | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # # Punkcast is no longer updated. This plugin is kept for # historic reasons. It was one of the default streamtuner1 # channels. import re import ahttp from config import conf import action from channels import * from config import * # basic.ch broadcast archive |
︙ | ︙ | |||
52 53 54 55 56 57 58 | <a\shref="(http://punkcast.com/(\d+)/index.html)"> .*? ALT="([^<">]+)" """, re.S|re.X) entries = [] #-- all from frontpage | | | | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | <a\shref="(http://punkcast.com/(\d+)/index.html)"> .*? ALT="([^<">]+)" """, re.S|re.X) entries = [] #-- all from frontpage html = ahttp.get("http://www.punkcast.com/") for uu in rx_link.findall(html): (homepage, id, title) = uu entries.append({ "genre": "%s" % id, "title": title, "playing": "PUNKCAST #%s" % id, "format": "audio/mpeg", "url": "none:", "homepage": homepage, "img": "http://punkcast.com/%s/PUNK%s.jpg" % (id, id) if conf.punkcast_img else None, }) # done return entries # special handler for play def play(self, row): rx_sound = re.compile("""(http://[^"<>]+[.](mp3|ogg|m3u|pls|ram))""") html = ahttp.get(row["homepage"]) # look up ANY audio url for uu in rx_sound.findall(html): log.DATA( uu ) (url, fmt) = uu action.play(url, self.mime_fmt(fmt), "srv") return # or just open webpage action.browser(row["homepage"]) |
Modified channels/radiobrowser.py from [11cf6e90ac] to [0a40d436ac].
︙ | ︙ | |||
33 34 35 36 37 38 39 | # Also has a neat JSON API, so is quite easy to support. import re import json from config import * from channels import * | | | 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | # Also has a neat JSON API, so is quite easy to support. import re import json from config import * from channels import * import ahttp # API endpoints: # http://www.radio-browser.info/webservice/json/countries # http://www.radio-browser.info/webservice/json/languages # http://www.radio-browser.info/webservice/json/tags # http://www.radio-browser.info/webservice/json/stations/topclick |
︙ | ︙ | |||
108 109 110 111 112 113 114 | bitrate = - int(e["negativevotes"]), )) return r # fetch multiple pages def api(self, method, params={}): | | | 108 109 110 111 112 113 114 115 116 117 118 119 120 | bitrate = - int(e["negativevotes"]), )) return r # fetch multiple pages def api(self, method, params={}): j = ahttp.get(self.base + method, params) try: return json.loads(j, strict=False) # some entries contain invalid character encodings except: return [] |
Modified channels/shoutcast.py from [7ab0770356] to [fe5a39c8bf].
︙ | ︙ | |||
19 20 21 22 23 24 25 | # Shoutcast is a server software for audio streaming. It automatically spools # station information on shoutcast.com # # It has been aquired by Radionomy in 2014, since then significant changes # took place. The former yellow pages API got deprecated. | | | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | # Shoutcast is a server software for audio streaming. It automatically spools # station information on shoutcast.com # # It has been aquired by Radionomy in 2014, since then significant changes # took place. The former yellow pages API got deprecated. import ahttp from json import loads as json_decode import re from config import * from channels import * from pq import pq import channels from compat2and3 import urllib |
︙ | ︙ | |||
62 63 64 65 66 67 68 | # redefine streams = {} # Extracts the category list from www.shoutcast.com, # stores a catmap (title => id) def update_categories(self): | | | 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | # redefine streams = {} # Extracts the category list from www.shoutcast.com, # stores a catmap (title => id) def update_categories(self): html = ahttp.get(self.base_url) #log.DATA( html ) self.categories = [] # Genre list in sidebar """ <li><a id="genre-90" href="/Genre?name=Adult" onclick="loadStationsByGenre('Adult', 90, 89); return false;">Adult</a></li> """ rx = re.compile(r"loadStationsByGenre\( '([^']+)' [,\s]* (\d+) [,\s]* (\d+) \)", re.X) subs = rx.findall(html) |
︙ | ︙ | |||
99 100 101 102 103 104 105 | id = self.catmap[cat] # page url = "http://www.shoutcast.com/Home/BrowseByGenre" params = { "genrename": cat } referer = None try: | | | 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | id = self.catmap[cat] # page url = "http://www.shoutcast.com/Home/BrowseByGenre" params = { "genrename": cat } referer = None try: json = ahttp.get(url, params=params, referer=referer, post=1, ajax=1) json = json_decode(json) except: log.ERR("HTTP request or JSON decoding failed. Outdated python/requests perhaps.") return [] self.parent.status(0.75) # remap JSON |
︙ | ︙ |
Modified channels/surfmusik.py from [c2116e521c] to [42aa02e156].
︙ | ︙ | |||
29 30 31 32 33 34 35 | # TV stations don't seem to work mostly. And loading the webtv/ pages would # be somewhat slow (for querying the actual mms:// streams). # # # import re | | | 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | # TV stations don't seem to work mostly. And loading the webtv/ pages would # be somewhat slow (for querying the actual mms:// streams). # # # import re import ahttp from config import * from channels import * # Surfmusik sharing site class surfmusik (ChannelPlugin): |
︙ | ︙ | |||
92 93 94 95 96 97 98 | """, re.X) r = [] # Add main categories, and fetch subentries (genres or country names) for cat in cats[lang]: r.append(cat) if map.get(cat): | | | 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | """, re.X) r = [] # Add main categories, and fetch subentries (genres or country names) for cat in cats[lang]: r.append(cat) if map.get(cat): subcats = rx_links.findall( ahttp.get(base_url + map[cat]) ) subcats = [x.replace("+", " ").title() for x in subcats] r.append(sorted(subcats)) self.categories = r # summarize links from surfmusik |
︙ | ︙ | |||
127 128 129 130 131 132 133 | path = path_genre # country else: path = path_country if path is not None: ucat = cat.replace(" ", "+").lower() | | | | 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | path = path_genre # country else: path = path_country if path is not None: ucat = cat.replace(" ", "+").lower() html = ahttp.get(base_url + path + ucat + ".html") html = re.sub("&#x?\d+;", "", html) rx_radio = re.compile(r""" <td\s+class="home1"><a[^>]*\s+href="(.+?)"[^>]*> .*? <a\s+class="navil"\s+href="([^"]+)"[^>]*>([^<>]+)</a></td> <td\s+class="ort">(.*?)</td>.*? <td\s+class="ort">(.*?)</td>.*? """, re.X|re.I) rx_video = re.compile(r""" <a[^>]+href="([^"]+)"[^>]*>(?:<[^>]+>)*Externer """, re.X|re.I) # per-country list for uu in rx_radio.findall(html): (url, homepage, name, genre, city) = uu # find mms:// for webtv stations if is_tv: m = rx_video.search(ahttp.get(url)) if m: url = m.group(1) # just convert /radio/ into /m3u/ link else: url = "http://www.surfmusik.de/m3u/" + url[30:-5] + ".m3u" entries.append({ |
︙ | ︙ |
Modified channels/tunein.py from [0bca890c80] to [ed1a42d71d].
︙ | ︙ | |||
24 25 26 27 28 29 30 | # import re import json from config import * from channels import * | | | 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | # import re import json from config import * from channels import * import ahttp from xml.etree import ElementTree # TuneIn radio directory class tunein (ChannelPlugin): # control flags |
︙ | ︙ | |||
82 83 84 85 86 87 88 | }) return r # Fetch OPML, convert outline elements to dicts def api(self, method): r = [] | | | 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | }) return r # Fetch OPML, convert outline elements to dicts def api(self, method): r = [] opml = ahttp.get(self.base + method) x = ElementTree.fromstring(opml) for outline in x.findall(".//outline"): r.append(dict(outline.items())) return r |
Modified channels/xiph.py from [c02f4fea70] to [f7d2f8fd13].
︙ | ︙ | |||
25 26 27 28 29 30 31 | # # The category list is hardwired in this plugin. # from config import * from uikit import uikit | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | # # The category list is hardwired in this plugin. # from config import * from uikit import uikit import ahttp from channels import * #from xml.sax.saxutils import unescape as entity_decode, escape as xmlentities #import xml.dom.minidom import json import re |
︙ | ︙ | |||
100 101 102 103 104 105 106 | params = {} if cat: params["cat"] = cat.lower() if search: params["search"] = search #-- get data | | | 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | params = {} if cat: params["cat"] = cat.lower() if search: params["search"] = search #-- get data data = ahttp.get(self.json_url, params=params) #log.DATA(data) #-- extract l = [] log.PROC( "processing api.dir.xiph.org JSON (via api.include-once.org cache)" ) data = json.loads(data) for e in data: |
︙ | ︙ |