Check-in [e9c2e9a3a2]
Overview
| Comment: | Apply more `self.status(i / pages)` progressbar calls for some channels. |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | trunk |
| Files: | files | file ages | folders |
| SHA1: |
e9c2e9a3a2c7524f41de2a9e061189ee |
| User & Date: | mario on 2020-05-15 19:36:33 |
| Other Links: | manifest | tags |
Context
|
2020-05-16
| ||
| 10:07 | Documentation typos fixed and itemized more config options for some plugins. check-in: 04648f1c1c user: mario tags: trunk | |
|
2020-05-15
| ||
| 19:36 | Apply more `self.status(i / pages)` progressbar calls for some channels. check-in: e9c2e9a3a2 user: mario tags: trunk | |
| 19:00 | ui change title bar: use gtk.STOCK_DIRECTORY as fallback when no png: icon is present (bookmarks channel) check-in: 7aa5d1f8dd user: mario tags: trunk | |
Changes
Modified channels/peertube.py from [101375306a] to [fc7ec6bb9a].
| ︙ | |||
125 126 127 128 129 130 131 | 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | - + - + | # INTERNA # # The /embed/ section of the url can sometimes be substituted with: # ยท /videos/watch/UUID # ยท /static/streaming-playlists/hls/UUID/master.m3u8 # ยท /static/webseed/UUID.mp4 # Though that's sometimes blocked / or not consistently supported on all instances. |
| ︙ | |||
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | + + |
"categoryOneOf": self.catmap[cat],
"count": 100,
"sort": "-publishedAt",
"nsfw": "false"
}
# fetch + map
self.status(0.9)
entries = []
for video in self.api("videos", params):
#log.DATA(video)
entries.append(self.map_data(video))
#log.EN(json.dumps(entries, indent=4))
self.status(1.0)
return entries
# peertube entry to streamtunter2 dict
def map_data(self, v):
url = "http://" + v["channel"]["host"]
return dict(
uuid = v["uuid"],
|
| ︙ | |||
196 197 198 199 200 201 202 203 204 205 206 207 208 209 | 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 | + |
)
# fetch one or multiple pages from API
def api(self, method, params={}, debug=False, count=200, **kw):
r = []
for i in range(0, 5):
self.status(i / 6.0)
add = json.loads(
ahttp.get("http://{}/api/v1/{}".format(conf.peertube_srv, method), params, **kw)
)
if not add.get("data"):
return add
else:
r += add["data"]
|
| ︙ |
Modified channels/radiobrowser.py from [984872dde3] to [4c365e0c90].
| ︙ | |||
101 102 103 104 105 106 107 108 109 110 111 112 113 114 | 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | + |
if search:
data = self.api(
"stations/search",
{"search": search, "limit": conf.max_streams}
)
# topclick, topvote
elif cat in self.pricat:
self.status(0.3)
data = self.api(
"stations/{}/{}".format(cat, conf.max_streams),
{"limit": conf.max_streams}
)
# empty category
#elif cat in ("tags", "countries", "languages"):
# return [
|
| ︙ | |||
125 126 127 128 129 130 131 132 133 134 135 136 137 138 | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | + |
"limit": conf.max_streams * 2
}
)
#data = self.api("stations/" + self.catmap[conf.radiobrowser_cat] + "/" + cat)
if len(data) >= 5000:
data = data[0:5000]
self.status(0.75)
r = []
for e in data:
r.append(dict(
genre = e["tags"],
url = e["url"],
format = mime_fmt(e["codec"]),
|
| ︙ | |||
164 165 166 167 168 169 170 | 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | - + - + |
# callback for general stream play event
def click(self, row, channel):
if not channel == self:
return
# fetch uuid, then register click
|
| ︙ |
Modified channels/tunein.py from [3410153f13] to [e947db0e20].
| ︙ | |||
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | + + + |
# Fetch OPML, convert outline elements to dicts
def api(self, method):
r = []
# fetch API page
next = self.base + method
max = int(conf.radiotime_maxpages)
i = 0.1
while next:
self.status(i / (12.5 + max * 0.7))
i += 1.1
opml = ahttp.get(next)
next = None
x = ElementTree.fromstring(opml)
# append entries
for outline in x.findall(".//outline"):
outline = dict(outline.items())
# additional pages
|
| ︙ |
Modified channels/xiph.py from [0800ed29b7] to [89973c3cb6].
| ︙ | |||
195 196 197 198 199 200 201 | 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 | - + - + |
elif by_format.get(cat):
url = "http://dir.xiph.org/codecs/{}".format(by_format[cat].title())
elif cat:
url = "http://dir.xiph.org/genres/{}".format(cat.title())
# Collect all result pages
html = ahttp.get(url)
|
| ︙ |
Modified contrib/radiolist.py from [531c228eeb] to [a64e8fb450].
| ︙ | |||
71 72 73 74 75 76 77 78 79 80 81 82 83 84 | 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | + |
}
# extract stream urls
def update_streams(self, cat):
entries = []
html = ""
for i in range(1, int(int(conf.max_streams)/50)+1):
self.status(i / 11.0)
html = html + ahttp.get("http://radiolist.net/genre/{}?paginate={}".format(cat.lower(), i))
if not html.find('?paginate={}">Next'.format(i+1)) >= 0:
break
for block in re.findall(self.recipe["block"], html, re.S|re.M):
#log.HTML(block)
e = {"genre":"-", "playing":cat, "format":"audio/mpeg"}
for id,rx in self.recipe["fields"].iteritems():
|
| ︙ |
Modified contrib/radionet.py from [6f48b334fd] to [4a9edf7fc3].
| ︙ | |||
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | + + |
# Fetch entries
def update_streams(self, cat, search=None):
# category page, get key
urlcat = cat.replace(" ", "-").lower()
self.status(0.1)
html = ahttp.get(self.genre_url.format(urlcat))
for p in range(2, 4):
self.status(p / 5.5)
if html.find('?p={}"'.format(p)) >= 0:
html += ahttp.get(self.genre_url.format(urlcat) + "?p={}".format(p))
self.set_key(html)
r = []
# prefetch images from embedded json (genres and location would also be sourceable from "playables":[โฆ])
imgs = dict(re.findall('\],"id":"(\w+)","logo100x100":"(htt[^"]+)",', html))
|
| ︙ |
Modified contrib/rcast.py from [9529aa8abe] to [1b543a00d6].
| ︙ | |||
50 51 52 53 54 55 56 57 58 59 60 61 62 63 | 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | + |
# fetch
html = ""
if search: # pretty much identical (except first page should be /dir/?action=search and POST field)
cat = search
max_pages = 1
for i in range(1, max_pages + 1):
self.status(i / 12.5)
html += ahttp.get("%s/%s/page%s" % (self.base, cat, i))
if not re.search('href="/dir/%s/page%s">Next' % (cat, i + 1), html):
break
# extract
ls = re.findall("""
<tr> .*?
|
| ︙ |