94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
|
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
html = ahttp.get(self.genre_url.format(urlcat))
for p in range(2, 4):
self.status(p / 5.5)
if html.find('?p={}"'.format(p)) >= 0:
html += ahttp.get(self.genre_url.format(urlcat) + "?p={}".format(p))
self.set_key(html)
r = []
# fetch JSON
ls_json = re.findall("__CONTEXT_PROVIDER__\s*=\s*(\{.+\});", html)
if ls_json:
try:
return self.from_json(ls_json)
except:
log.error("JSON extraction failed", traceback.format_exc())
# prefetch images from embedded json (genres and location would also be sourceable from "playables":[…])
imgs = dict(re.findall('\],"id":"(\w+)","logo100x100":"(htt[^"]+)",', html))
#log.DATA(imgs)
# top 100 of the most horrible html serializations
"""
<a data-testid="list-item" href="/s/rds"><div class="sc-1crnfmg-8
dUCUtS"><div class="sc-1crnfmg-0 hwucGp"><div
class="lazyload-placeholder"></div><div class="sc-1crnfmg-1 eJysYN"><svg
class="sc-1crnfmg-9 bWqKYT" xmlns="http://www.w3.org/2000/svg" viewBox="0 0
32 32"><path d="M4 32l25.26-16L4 0z"></path></svg></div></div><div
class="sc-1crnfmg-2 evYzNY"><div class="sc-1crnfmg-3 bbMMP">RDS - Radio
Dimensione Suono</div><div class="sc-1crnfmg-5 cMsIwq">Rome, <!--
-->Italy<!-- --> / Hits, Pop, Top 40 & Charts</div><div
</div></a></div>
<div class="sc-1crnfmg-11 sc-1crnfmg-12 cYzyuZ"><a href="/s/kissfmuk">
<div class="sc-1crnfmg-8 cEmqZI">
<div class="sc-1crnfmg-0 iDuFwr">
<div class="lazyload-placeholder"></div>
<div class="sc-1crnfmg-1 ezaTdn">
<svg class="sc-1crnfmg-9 hQgRat" xmlns="http://www.w3.org/2000/svg" viewbox="0 0 32 32"><path d="M4 32l25.26-16L4 0z"></path></svg>
class="sc-1crnfmg-6 hSmqVb"></div></div></div></a></div><div
class="sc-1crnfmg-11 sc-1crnfmg-12 kbEwWf">
<a data-testid="list-item"
href="/s/kiis1027"><div class="sc-1crnfmg-8 dUCUtS"><div class="sc-1crnfmg-0
hwucGp"><div class="lazyload-placeholder"></div><div class="sc-1crnfmg-1
eJysYN"><svg class="sc-1crnfmg-9 bWqKYT" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 32 32"><path d="M4 32l25.26-16L4
</div>
</div>
<div class="sc-1crnfmg-2 incVhC">
<div class="sc-1crnfmg-3 chXIOx">KISS FM UK</div>
<div class="sc-1crnfmg-5 fitBQg">London, <!-- -->United Kingdom<!-- --> / Hits, Pop, R'n'B</div>
<div class="sc-1crnfmg-6 jaxcgd"></div>
0z"></path></svg></div></div><div class="sc-1crnfmg-2 evYzNY"><div
class="sc-1crnfmg-3 bbMMP">102.7 KIIS FM</div><div class="sc-1crnfmg-5
cMsIwq">Los Angeles, <!-- -->USA<!-- --> / Top 40 & Charts,
Hits</div><div class="sc-1crnfmg-6 hSmqVb"></div></div></div></a></div><div
class="sc-1crnfmg-11 sc-1crnfmg-12 kbEwWf">
</div>
{"city":"Hanover","country":"Germany","genres":["Pop","80s","Top 40 & Charts"],"id":"ndr2","logo100x100":"https://d3kle7qwymxpcy.cloudfront.net/images/broadcasts/02/33/2262/1/c100.png","logo300x300":"https://d3kle7qwymxpcy.cloudfront.net/images/broadcasts/02/33/2262/1/c300.png","logo630x630":"","name":"NDR 2","type":"STATION"}')
"""
rx = re.compile("""
<a\s+href="(?:https?:)?(?://(?:[\w-]+)\.radio\.net)?/s/([^"]+)/?"> .*?
<a\s+[^>]*\\bhref="(?:https?:)?(?://(?:[\w-]+)\.radio\.net)?/s/([^"]+)/?"> .*?
<div[^>]+> (\w[^<]+) </div> \s*
<div[^>]+> (\w[^/]+) \s+ / \s+ (\w.+?)</div>
""", re.X|re.S
)
# extract text fields
for d in re.findall(rx, html):
#log.DATA_ROW(d)
href, title, location, desc = d
# refurbish extracted strings
r.append(dict(
name = href,
genre = unhtml(desc),
title = unhtml(title),
playing = unhtml(location),
url = "urn:radionet:"+href,
homepage = "http://www.radio.net/s/{}".format(href),
img = imgs.get(href, "https://www.radio.net/favicon.ico"),
));
return r
# process json
def from_json(self, ls_json):
ls = []
for js in ls_json:
ls += json.loads(js)["data"]["stations"]["playables"]
r = []
for row in ls:
href = row["id"]
r.append(dict(
name = href,
title = row["name"],
genre = ",".join(row["genres"]),
url = "urn:radionet:"+href,
playing = row.get("city", row.get("country", "-")),
homepage = "http://www.radio.net/s/{}".format(href),
img = row["logo100x100"],
))
print(row)
return r
# api search is gone, now requires to fetch streamUrl from per-radio homepage
def resolve_urn(self, row):
if row.get("url", "-").find("urn:radionet:") != 0:
return
html = ahttp.get(row["homepage"])
stream = re.findall('"stream[s:[{"\s]+url"[\s:]+"([^"]+)"', html, re.S|re.I)
|