Internet radio browser GUI for music/video streams from various directory services.

โŒˆโŒ‹ โŽ‡ branch:  streamtuner2


Check-in [e9c2e9a3a2]

Overview
Comment:Apply more `self.status(i / pages)` progressbar calls for some channels.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: e9c2e9a3a2c7524f41de2a9e061189eeb7c380ac
User & Date: mario on 2020-05-15 19:36:33
Other Links: manifest | tags
Context
2020-05-16
10:07
Documentation typos fixed and itemized more config options for some plugins. check-in: 04648f1c1c user: mario tags: trunk
2020-05-15
19:36
Apply more `self.status(i / pages)` progressbar calls for some channels. check-in: e9c2e9a3a2 user: mario tags: trunk
19:00
ui change title bar: use gtk.STOCK_DIRECTORY as fallback when no png: icon is present (bookmarks channel) check-in: 7aa5d1f8dd user: mario tags: trunk
Changes

Modified channels/peertube.py from [101375306a] to [fc7ec6bb9a].

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# INTERNA
#
# The /embed/ section of the url can sometimes be substituted with:
#  ยท /videos/watch/UUID
#  ยท /static/streaming-playlists/hls/UUID/master.m3u8
#  ยท /static/webseed/UUID.mp4
# Though that's sometimes blocked / or not consistently supported on all instances.
# Which is why resoslve_urn does an extra /api/v1/videos/uuid lookup.
#
class peertube (ChannelPlugin):

    # control attributes
    listformat = "srv"
    has_search = True
    audioformat = "video/youtube"
    titles = dict( genre="Channel", title="Title", playing="Description", bitrate=False, listeners=False )
    srv = conf.peertube_srv
    image_resize = 48
    fixed_size = [48,32]








|




|







125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# INTERNA
#
# The /embed/ section of the url can sometimes be substituted with:
#  ยท /videos/watch/UUID
#  ยท /static/streaming-playlists/hls/UUID/master.m3u8
#  ยท /static/webseed/UUID.mp4
# Though that's sometimes blocked / or not consistently supported on all instances.
# Which is why resolve_urn does an extra /api/v1/videos/uuid lookup.
#
class peertube (ChannelPlugin):

    # control attributes
    listformat = "href"
    has_search = True
    audioformat = "video/youtube"
    titles = dict( genre="Channel", title="Title", playing="Description", bitrate=False, listeners=False )
    srv = conf.peertube_srv
    image_resize = 48
    fixed_size = [48,32]

169
170
171
172
173
174
175

176
177
178
179
180

181
182
183
184
185
186
187
                "categoryOneOf": self.catmap[cat],
                "count": 100,
                "sort": "-publishedAt",
                "nsfw": "false"
            }

        # fetch + map

        entries = []
        for video in self.api("videos", params):
            #log.DATA(video)
            entries.append(self.map_data(video))
        #log.EN(json.dumps(entries, indent=4))

        return entries

    # peertube entry to streamtunter2 dict
    def map_data(self, v):
        url = "http://" + v["channel"]["host"]
        return dict(
            uuid = v["uuid"],







>





>







169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
                "categoryOneOf": self.catmap[cat],
                "count": 100,
                "sort": "-publishedAt",
                "nsfw": "false"
            }

        # fetch + map
        self.status(0.9)
        entries = []
        for video in self.api("videos", params):
            #log.DATA(video)
            entries.append(self.map_data(video))
        #log.EN(json.dumps(entries, indent=4))
        self.status(1.0)
        return entries

    # peertube entry to streamtunter2 dict
    def map_data(self, v):
        url = "http://" + v["channel"]["host"]
        return dict(
            uuid = v["uuid"],
196
197
198
199
200
201
202

203
204
205
206
207
208
209
        )


    # fetch one or multiple pages from API
    def api(self, method, params={}, debug=False, count=200, **kw):
        r = []
        for i in range(0, 5):

            add = json.loads(
                ahttp.get("http://{}/api/v1/{}".format(conf.peertube_srv, method), params, **kw)
            )
            if not add.get("data"):
                return add
            else:
                r += add["data"]







>







198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
        )


    # fetch one or multiple pages from API
    def api(self, method, params={}, debug=False, count=200, **kw):
        r = []
        for i in range(0, 5):
            self.status(i / 6.0)
            add = json.loads(
                ahttp.get("http://{}/api/v1/{}".format(conf.peertube_srv, method), params, **kw)
            )
            if not add.get("data"):
                return add
            else:
                r += add["data"]

Modified channels/radiobrowser.py from [984872dde3] to [4c365e0c90].

101
102
103
104
105
106
107

108
109
110
111
112
113
114
        if search:
            data = self.api(
                "stations/search",
                {"search": search, "limit": conf.max_streams}
            )
        # topclick, topvote
        elif cat in self.pricat:

            data = self.api(
                "stations/{}/{}".format(cat, conf.max_streams),
                {"limit": conf.max_streams}
            )
        # empty category
        #elif cat in ("tags", "countries", "languages"):
        #    return [







>







101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        if search:
            data = self.api(
                "stations/search",
                {"search": search, "limit": conf.max_streams}
            )
        # topclick, topvote
        elif cat in self.pricat:
            self.status(0.3)
            data = self.api(
                "stations/{}/{}".format(cat, conf.max_streams),
                {"limit": conf.max_streams}
            )
        # empty category
        #elif cat in ("tags", "countries", "languages"):
        #    return [
125
126
127
128
129
130
131

132
133
134
135
136
137
138
                    "limit": conf.max_streams * 2
                }
            )
            #data = self.api("stations/" + self.catmap[conf.radiobrowser_cat] + "/" + cat)

        if len(data) >= 5000:
            data = data[0:5000]


        r = []
        for e in data:
            r.append(dict(
                genre = e["tags"],
                url = e["url"],
                format = mime_fmt(e["codec"]),







>







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
                    "limit": conf.max_streams * 2
                }
            )
            #data = self.api("stations/" + self.catmap[conf.radiobrowser_cat] + "/" + cat)

        if len(data) >= 5000:
            data = data[0:5000]
        self.status(0.75)

        r = []
        for e in data:
            r.append(dict(
                genre = e["tags"],
                url = e["url"],
                format = mime_fmt(e["codec"]),
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182


    # callback for general stream play event
    def click(self, row, channel):
        if not channel == self:
            return
        # fetch uuid, then register click
        uuid = self.api("stations/byurl", {"url": row.get("url")})
        if uuid:
            if isinstance(uuid, list): # just vote on the first entry
                uuid = uuid[0]
            log.PROC_CLICK_COUNT(self.api("url/{}".format(uuid["stationuuid"])))


    # Add radio station to RBI
    def submit(self, *w):
        cn = self.parent.channel()
        row = cn.row()
        # convert row from channel







|



|







166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184


    # callback for general stream play event
    def click(self, row, channel):
        if not channel == self:
            return
        # fetch uuid, then register click
        uuid = self.api("stations/byurl", {"url": row.get("url")}, quieter=1)
        if uuid:
            if isinstance(uuid, list): # just vote on the first entry
                uuid = uuid[0]
            log.CLICK(self.api("url/{}".format(uuid["stationuuid"], quieter=1)))


    # Add radio station to RBI
    def submit(self, *w):
        cn = self.parent.channel()
        row = cn.row()
        # convert row from channel

Modified channels/tunein.py from [3410153f13] to [e947db0e20].

87
88
89
90
91
92
93

94


95
96
97
98
99
100
101

    # Fetch OPML, convert outline elements to dicts
    def api(self, method):
        r = []
        # fetch API page
        next = self.base + method
        max = int(conf.radiotime_maxpages)

        while next:


            opml = ahttp.get(next)
            next = None
            x = ElementTree.fromstring(opml)
            # append entries
            for outline in x.findall(".//outline"):
                outline = dict(outline.items())
                # additional pages







>

>
>







87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

    # Fetch OPML, convert outline elements to dicts
    def api(self, method):
        r = []
        # fetch API page
        next = self.base + method
        max = int(conf.radiotime_maxpages)
        i = 0.1
        while next:
            self.status(i / (12.5 + max * 0.7))
            i += 1.1
            opml = ahttp.get(next)
            next = None
            x = ElementTree.fromstring(opml)
            # append entries
            for outline in x.findall(".//outline"):
                outline = dict(outline.items())
                # additional pages

Modified channels/xiph.py from [0800ed29b7] to [89973c3cb6].

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
      elif by_format.get(cat):
          url = "http://dir.xiph.org/codecs/{}".format(by_format[cat].title())
      elif cat:
          url = "http://dir.xiph.org/genres/{}".format(cat.title())

      # Collect all result pages
      html = ahttp.get(url)
      for i in range(1,9):
          m = re.search('href="[?]cursor=(\w+)">Next</a>', html)
          if not m:
              break
          self.status(i/5.1)
          html += ahttp.get(url, {"cursor": m.group(1)})
      try:
          html = html.encode("raw_unicode_escape").decode("utf-8")
      except:
          pass

      # Find streams







|



|







195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
      elif by_format.get(cat):
          url = "http://dir.xiph.org/codecs/{}".format(by_format[cat].title())
      elif cat:
          url = "http://dir.xiph.org/genres/{}".format(cat.title())

      # Collect all result pages
      html = ahttp.get(url)
      for i in range(1, 9):
          m = re.search('href="[?]cursor=(\w+)">Next</a>', html)
          if not m:
              break
          self.status(i / 11.1)
          html += ahttp.get(url, {"cursor": m.group(1)})
      try:
          html = html.encode("raw_unicode_escape").decode("utf-8")
      except:
          pass

      # Find streams

Modified contrib/radiolist.py from [531c228eeb] to [a64e8fb450].

71
72
73
74
75
76
77

78
79
80
81
82
83
84
    }

    # extract stream urls
    def update_streams(self, cat):
        entries = []
        html = ""
        for i in range(1, int(int(conf.max_streams)/50)+1):

            html = html + ahttp.get("http://radiolist.net/genre/{}?paginate={}".format(cat.lower(), i))
            if not html.find('?paginate={}">Next'.format(i+1)) >= 0:
                break
        for block in re.findall(self.recipe["block"], html, re.S|re.M):
            #log.HTML(block)
            e = {"genre":"-", "playing":cat, "format":"audio/mpeg"}
            for id,rx in self.recipe["fields"].iteritems():







>







71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
    }

    # extract stream urls
    def update_streams(self, cat):
        entries = []
        html = ""
        for i in range(1, int(int(conf.max_streams)/50)+1):
            self.status(i / 11.0)
            html = html + ahttp.get("http://radiolist.net/genre/{}?paginate={}".format(cat.lower(), i))
            if not html.find('?paginate={}">Next'.format(i+1)) >= 0:
                break
        for block in re.findall(self.recipe["block"], html, re.S|re.M):
            #log.HTML(block)
            e = {"genre":"-", "playing":cat, "format":"audio/mpeg"}
            for id,rx in self.recipe["fields"].iteritems():

Modified contrib/radionet.py from [6f48b334fd] to [4a9edf7fc3].

86
87
88
89
90
91
92

93
94

95
96
97
98
99
100
101


    # Fetch entries
    def update_streams(self, cat, search=None):

        # category page, get key
        urlcat = cat.replace(" ", "-").lower()

        html = ahttp.get(self.genre_url.format(urlcat))
        for p in range(2, 4):

            if html.find('?p={}"'.format(p)) >= 0:
                html += ahttp.get(self.genre_url.format(urlcat) + "?p={}".format(p))
        self.set_key(html)
        r = []

        # prefetch images from embedded json (genres and location would also be sourceable from "playables":[โ€ฆ])
        imgs = dict(re.findall('\],"id":"(\w+)","logo100x100":"(htt[^"]+)",', html))







>


>







86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103


    # Fetch entries
    def update_streams(self, cat, search=None):

        # category page, get key
        urlcat = cat.replace(" ", "-").lower()
        self.status(0.1)
        html = ahttp.get(self.genre_url.format(urlcat))
        for p in range(2, 4):
            self.status(p / 5.5)
            if html.find('?p={}"'.format(p)) >= 0:
                html += ahttp.get(self.genre_url.format(urlcat) + "?p={}".format(p))
        self.set_key(html)
        r = []

        # prefetch images from embedded json (genres and location would also be sourceable from "playables":[โ€ฆ])
        imgs = dict(re.findall('\],"id":"(\w+)","logo100x100":"(htt[^"]+)",', html))

Modified contrib/rcast.py from [9529aa8abe] to [1b543a00d6].

50
51
52
53
54
55
56

57
58
59
60
61
62
63

        # fetch
        html = ""
        if search: # pretty much identical (except first page should be /dir/?action=search and POST field)
            cat = search
            max_pages = 1
        for i in range(1, max_pages + 1):

            html += ahttp.get("%s/%s/page%s" % (self.base, cat, i))
            if not re.search('href="/dir/%s/page%s">Next' % (cat, i + 1), html):
                break

        # extract
        ls = re.findall("""
           <tr> .*?







>







50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

        # fetch
        html = ""
        if search: # pretty much identical (except first page should be /dir/?action=search and POST field)
            cat = search
            max_pages = 1
        for i in range(1, max_pages + 1):
            self.status(i / 12.5)
            html += ahttp.get("%s/%s/page%s" % (self.base, cat, i))
            if not re.search('href="/dir/%s/page%s">Next' % (cat, i + 1), html):
                break

        # extract
        ls = re.findall("""
           <tr> .*?