21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
#
import re
from config import *
from channels import *
import ahttp
# UU Wiki radio list
class ubuntuusers (ChannelPlugin):
# description
has_search = False
listformat = "srv"
titles = dict(playing=False, listeners=False, bitrate=False)
base = "http://wiki.ubuntuusers.de/Internetradio/Stationen?action=export&format=raw"
categories = ["stations"]
# Nope
def update_categories(self):
pass
# Fetches wiki page, extracts from raw markup.
# Which has a coherent formatting of entries like:
#
# == Pi-Radio (Berlin) ==
# [http://www.piradio.de] {de}
# {{{
# http://ice.rosebud-media.de:8000/88vier-ogg1.ogg
# }}}
#
def update_streams(self, cat, search=None):
# fetch page
wiki = ahttp.get(self.base)
# regexp lists out, just one srv url per entry
ls = re.findall(r"""
^==\s*([\w\s.-]+)\s*==\s+
^\[(http[^\s\]]+).*?\{(\w+)\}\s+
^\{\{\{\s+
^(http\S+)
""", wiki, re.X|re.S|re.M)
# pack into row list
return [
dict(genre=g, title=t, url=u, homepage=h, bitrate=0, listeners=0)
for t,h,g,u in ls
]
|
>
>
|
>
>
|
|
>
|
>
>
>
>
>
>
>
>
>
|
|
>
|
<
>
>
|
|
|
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
|
#
import re
from config import *
from channels import *
import ahttp
import itertools
# UU Wiki radio list
class ubuntuusers (ChannelPlugin):
# description
has_search = False
listformat = "srv"
titles = dict(playing=False, listeners=False, bitrate=False)
base = {
"stations": "http://wiki.ubuntuusers.de/Internetradio/Stationen?action=export&format=raw",
"tv": "http://wiki.ubuntuusers.de/Internet-TV/Stationen?action=export&format=raw",
}
categories = ["stations", "tv"]
# Nope
def update_categories(self):
pass
# Fetches wiki page, extracts from raw markup.
# Which has a coherent formatting of entries like:
#
# == Pi-Radio (Berlin) ==
# [http://www.piradio.de] {de}
# {{{
# http://ice.rosebud-media.de:8000/88vier-ogg1.ogg
# }}}
#
def update_streams(self, cat, search=None):
# fetch page
wiki = ahttp.get(self.base[cat])
f = "audio/mpeg" if cat == "stations" else "video/mp4"
# split on headlines
return itertools.chain(
self.join(src, f) for src in re.split("^==+", wiki, 0, re.M)
)
# Extract individual stations
def join(self, src, f):
# regexp lists out, just one srv url per entry
ls = re.findall(r"""
^\s*([\w\s.-]+)\s*==+\s+
(?: ^\[(http[^\s\]]+) .*? \{(\w+)\} )?
.*?
^\{\{\{
.*?
(\w+://[^"'\s]+)
""", src, re.X|re.S|re.M)
# pack into row list
return [
dict(genre=g, title=t, url=u, homepage=h, bitrate=0, listeners=0, format=f, listformat="href")
for t,h,g,u in ls
]
|