Check-in [fd4a1b208c]
Overview
| Comment: | Comment out dbg.DATA prints, add some statusbar updating calls. |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | trunk |
| Files: | files | file ages | folders |
| SHA1: |
fd4a1b208c00bdf37883add5943362b8 |
| User & Date: | mario on 2014-05-26 16:32:49 |
| Other Links: | manifest | tags |
Context
|
2014-05-26
| ||
| 19:59 | Retry regex after PyQuery extraction mode (or other way round). check-in: 696a0ab060 user: mario tags: trunk | |
| 16:32 | Comment out dbg.DATA prints, add some statusbar updating calls. check-in: fd4a1b208c user: mario tags: trunk | |
| 15:32 | Move status.progressbar init and cleanup into GenericChannel.load() check-in: 8c1da4e0f7 user: mario tags: trunk | |
Changes
Modified channels/internet_radio_org_uk.py from [d581ef46c2] to [865efc64e3].
| ︙ | |||
80 81 82 83 84 85 86 | 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | - + - - |
.*?
(\d+)\s*Kbps
(?:<br>(\d+)\s*Listeners)?
""", re.S|re.X)
#rx_homepage = re.compile('href="(http://[^"]+)"[^>]+target="_blank"')
rx_pages = re.compile('href="/stations/[-+\w%\d\s]+/page(\d+)">\d+</a>')
rx_numbers = re.compile("(\d+)")
|
| ︙ |
Modified channels/jamendo.py from [9a203fe500] to [75f35740e6].
| ︙ | |||
134 135 136 137 138 139 140 141 142 143 144 145 146 147 | 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | + |
"title": e["name"],
"playing": e["user_name"],
"homepage": e["shareurl"],
#"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
"url": "http://api.jamendo.com/get2/stream/track/xspf/?playlist_id=%s&n=all&order=random&from=app-%s" % (e["id"], self.cid),
"format": "application/xspf+xml",
})
self.parent.status(float(offset)/float(1000))
# albums
elif cat in ["albums", "newest"]:
for offset in self.retrieval_offsets():
data = http.get(self.api + "albums/musicinfo", params = {
"client_id": self.cid,
"format": "json",
|
| ︙ | |||
158 159 160 161 162 163 164 165 166 167 168 169 170 171 | 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | + |
"playing": e["artist_name"],
"img": e["image"],
"homepage": e["shareurl"],
#"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
"url": "http://api.jamendo.com/get2/stream/track/xspf/?album_id=%s&streamencoding=ogg2&n=all&from=app-%s" % (e["id"], self.cid),
"format": "application/xspf+xml",
})
self.parent.status(float(offset)/float(1000))
# genre list
else:
for offset in self.retrieval_offsets():
data = http.get(self.api + "tracks", params={
"client_id": self.cid,
("fuzzytags" if cat else "search"): (search if search else cat),
|
| ︙ | |||
186 187 188 189 190 191 192 193 194 195 196 197 198 199 | 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 | + |
"playing": e["album_name"] + " / " + e["artist_name"],
"img": e["album_image"],
"homepage": e["shareurl"],
#"url": e["audio"],
"url": "http://storage-new.newjamendo.com/?trackid=%s&format=ogg2&u=0&from=app-%s" % (e["id"], self.cid),
"format": fmt,
})
self.parent.status(float(offset)/float(1000))
# done
return entries
# offset list [0, 200, 400, 600, ...] according to max retrieval count
def retrieval_offsets(self):
|
| ︙ |
Modified channels/live365.py from [cd7a2a9078] to [ee023b1309].
| ︙ | |||
118 119 120 121 122 123 124 | 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | - + - + |
=["']audioQuality.+?>(\d+)\w<.+?
>DrawListenerStars\((\d+),.+?
>DrawRatingStars\((\d+),\s+(\d+),.*?
""", re.X|re.I|re.S|re.M)
# src="(http://www.live365.com/.+?/stationlogo\w+.jpg)".+?
# append entries to result list
|
| ︙ |
Modified channels/modarchive.py from [7c7eb2da06] to [f8a31cf711].
| ︙ | |||
111 112 113 114 115 116 117 | 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 | - + |
.*? /formats/(\w+).png"
.*? title="([^">]+)">([^<>]+)</a>
.*? >Rated</a>\s*(\d+)
""", re.X|re.S)
for uu in rx_mod.findall(html):
(url, id, fmt, title, file, rating) = uu
|
| ︙ |
Modified channels/shoutcast.py from [834f2c6518] to [2a36c113c8].
| ︙ | |||
113 114 115 116 117 118 119 | 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | - |
if (next < max):
#/radiolist.cfm?action=sub&string=&cat=Oldies&_cf_containerId=radiolist&_cf_nodebug=true&_cf_nocache=true&_cf_rc=0
#/radiolist.cfm?start=19&action=sub&string=&cat=Oldies&amount=18&order=listeners
# page
url = "http://www.shoutcast.com/radiolist.cfm?action=sub&string=&cat="+ucat+"&order=listeners&amount="+str(count)
|
| ︙ | |||
196 197 198 199 200 201 202 | 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | - - + |
"format": self.mime_fmt(div.find("td:eq(5)").text()),
"max": 0,
"genre": cat,
})
# display partial results (not strictly needed anymore, because we fetch just one page)
|
Modified channels/surfmusik.py from [6db3b80654] to [2563179fb4].
| ︙ | |||
121 122 123 124 125 126 127 128 129 130 131 132 133 | 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | + + |
"genre": genre,
"format": ("video/html" if is_tv else "audio/mpeg"),
})
# limit result list
if i > max:
break
if i % 10 == 0:
self.parent.status(float(i)/float(max+5))
i += 1
# done
return entries
|
Modified channels/xiph.py from [b7c1ca97af] to [921a2a9392].
| ︙ | |||
100 101 102 103 104 105 106 | 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | - + |
#__print__(dbg.DATA, data)
#-- extract
l = []
__print__( dbg.PROC, "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )
data = json.loads(data)
for e in data.values():
|
| ︙ |
Modified channels/youtube.py from [068cf22437] to [26bc9c21d4].
| ︙ | |||
313 314 315 316 317 318 319 | 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 | - + |
data["description"] = row["snippet"]["description"],
return data
# API version 2.0s jsonified XML needs different unpacking:
def wrap2(self, row):
|