Internet radio browser GUI for music/video streams from various directory services.

⌈⌋ ⎇ branch:  streamtuner2


Check-in [fd4a1b208c]

Overview
Comment:Comment out dbg.DATA prints, add some statusbar updating calls.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: fd4a1b208c00bdf37883add5943362b812898ded
User & Date: mario on 2014-05-26 16:32:49
Other Links: manifest | tags
Context
2014-05-26
19:59
Retry regex after PyQuery extraction mode (or other way round). check-in: 696a0ab060 user: mario tags: trunk
16:32
Comment out dbg.DATA prints, add some statusbar updating calls. check-in: fd4a1b208c user: mario tags: trunk
15:32
Move status.progressbar init and cleanup into GenericChannel.load() check-in: 8c1da4e0f7 user: mario tags: trunk
Changes

Modified channels/internet_radio_org_uk.py from [d581ef46c2] to [865efc64e3].

80
81
82
83
84
85
86
87
88
89
90

91
92
93
94
95
96
97
98
99
100
		.*?
                (\d+)\s*Kbps
                (?:<br>(\d+)\s*Listeners)?
        """, re.S|re.X)
        #rx_homepage = re.compile('href="(http://[^"]+)"[^>]+target="_blank"')
        rx_pages = re.compile('href="/stations/[-+\w%\d\s]+/page(\d+)">\d+</a>')
        rx_numbers = re.compile("(\d+)")
        self.parent.status("downloading category pages...")


        # multiple pages

        page = 1
        max = int(conf.internetradio_max_pages)
        max = (max if max > 1 else 1)
        while page <= max:
        
            # fetch
            html = http.get(self.homepage + "stations/" + cat.lower().replace(" ", "%20") + "/" + ("page"+str(page) if page>1 else ""))


            # regex parsing?







<



>

<
<







80
81
82
83
84
85
86

87
88
89
90
91


92
93
94
95
96
97
98
		.*?
                (\d+)\s*Kbps
                (?:<br>(\d+)\s*Listeners)?
        """, re.S|re.X)
        #rx_homepage = re.compile('href="(http://[^"]+)"[^>]+target="_blank"')
        rx_pages = re.compile('href="/stations/[-+\w%\d\s]+/page(\d+)">\d+</a>')
        rx_numbers = re.compile("(\d+)")



        # multiple pages
        max = max(int(conf.internetradio_max_pages), 1)
        page = 1


        while page <= max:
        
            # fetch
            html = http.get(self.homepage + "stations/" + cat.lower().replace(" ", "%20") + "/" + ("page"+str(page) if page>1 else ""))


            # regex parsing?

Modified channels/jamendo.py from [9a203fe500] to [75f35740e6].

134
135
136
137
138
139
140

141
142
143
144
145
146
147
                        "title": e["name"],
                        "playing": e["user_name"],
                        "homepage": e["shareurl"],
                        #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
                        "url": "http://api.jamendo.com/get2/stream/track/xspf/?playlist_id=%s&n=all&order=random&from=app-%s" % (e["id"], self.cid),
                        "format": "application/xspf+xml",
                    })


        # albums
        elif cat in ["albums", "newest"]:
            for offset in self.retrieval_offsets():
                data = http.get(self.api + "albums/musicinfo", params = {
                    "client_id": self.cid,
                    "format": "json",







>







134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
                        "title": e["name"],
                        "playing": e["user_name"],
                        "homepage": e["shareurl"],
                        #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
                        "url": "http://api.jamendo.com/get2/stream/track/xspf/?playlist_id=%s&n=all&order=random&from=app-%s" % (e["id"], self.cid),
                        "format": "application/xspf+xml",
                    })
                self.parent.status(float(offset)/float(1000))

        # albums
        elif cat in ["albums", "newest"]:
            for offset in self.retrieval_offsets():
                data = http.get(self.api + "albums/musicinfo", params = {
                    "client_id": self.cid,
                    "format": "json",
158
159
160
161
162
163
164

165
166
167
168
169
170
171
                        "playing": e["artist_name"],
                        "img": e["image"],
                        "homepage": e["shareurl"],
                        #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
                        "url": "http://api.jamendo.com/get2/stream/track/xspf/?album_id=%s&streamencoding=ogg2&n=all&from=app-%s" % (e["id"], self.cid),
                        "format": "application/xspf+xml",
                    })

		
        # genre list
        else:
            for offset in self.retrieval_offsets():
                data = http.get(self.api + "tracks", params={
                    "client_id": self.cid,
                    ("fuzzytags" if cat else "search"): (search if search else cat),







>







159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
                        "playing": e["artist_name"],
                        "img": e["image"],
                        "homepage": e["shareurl"],
                        #"url": "http://api.jamendo.com/v3.0/playlists/file?client_id=%s&id=%s" % (self.cid, e["id"]),
                        "url": "http://api.jamendo.com/get2/stream/track/xspf/?album_id=%s&streamencoding=ogg2&n=all&from=app-%s" % (e["id"], self.cid),
                        "format": "application/xspf+xml",
                    })
                self.parent.status(float(offset)/float(1000))
		
        # genre list
        else:
            for offset in self.retrieval_offsets():
                data = http.get(self.api + "tracks", params={
                    "client_id": self.cid,
                    ("fuzzytags" if cat else "search"): (search if search else cat),
186
187
188
189
190
191
192

193
194
195
196
197
198
199
                        "playing": e["album_name"] + " / " + e["artist_name"],
                        "img": e["album_image"],
                        "homepage": e["shareurl"],
                        #"url": e["audio"],
                        "url": "http://storage-new.newjamendo.com/?trackid=%s&format=ogg2&u=0&from=app-%s" % (e["id"], self.cid),
                        "format": fmt,
                    })

 
        # done    
        return entries

    
    # offset list [0, 200, 400, 600, ...] according to max retrieval count
    def retrieval_offsets(self):







>







188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
                        "playing": e["album_name"] + " / " + e["artist_name"],
                        "img": e["album_image"],
                        "homepage": e["shareurl"],
                        #"url": e["audio"],
                        "url": "http://storage-new.newjamendo.com/?trackid=%s&format=ogg2&u=0&from=app-%s" % (e["id"], self.cid),
                        "format": fmt,
                    })
                self.parent.status(float(offset)/float(1000))
 
        # done    
        return entries

    
    # offset list [0, 200, 400, 600, ...] according to max retrieval count
    def retrieval_offsets(self):

Modified channels/live365.py from [cd7a2a9078] to [ee023b1309].

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            =["']audioQuality.+?>(\d+)\w<.+?
            >DrawListenerStars\((\d+),.+?
            >DrawRatingStars\((\d+),\s+(\d+),.*?
                """, re.X|re.I|re.S|re.M)
#            src="(http://www.live365.com/.+?/stationlogo\w+.jpg)".+?

            # append entries to result list
            __print__( dbg.DATA, html )
            ls = []
            for row in rx.findall(html):
                __print__( dbg.DATA, row )
                points = int(row[8])
                count = int(row[9])
                ls.append({
                    "launch_id": row[0],
                    "sofo": row[0],  # subscribe-or-fuck-off status flags
                    "state":  (""  if  row[0]=="OK"  else  gtk.STOCK_STOP),
                    "homepage": entity_decode(row[1]),







|


|







118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
            =["']audioQuality.+?>(\d+)\w<.+?
            >DrawListenerStars\((\d+),.+?
            >DrawRatingStars\((\d+),\s+(\d+),.*?
                """, re.X|re.I|re.S|re.M)
#            src="(http://www.live365.com/.+?/stationlogo\w+.jpg)".+?

            # append entries to result list
            #__print__( dbg.DATA, html )
            ls = []
            for row in rx.findall(html):
                #__print__( dbg.DATA, row )
                points = int(row[8])
                count = int(row[9])
                ls.append({
                    "launch_id": row[0],
                    "sofo": row[0],  # subscribe-or-fuck-off status flags
                    "state":  (""  if  row[0]=="OK"  else  gtk.STOCK_STOP),
                    "homepage": entity_decode(row[1]),

Modified channels/modarchive.py from [7c7eb2da06] to [f8a31cf711].

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
            .*?    /formats/(\w+).png"
            .*?    title="([^">]+)">([^<>]+)</a>
            .*?    >Rated</a>\s*(\d+)
        """, re.X|re.S)
        
        for uu in rx_mod.findall(html):
            (url, id, fmt, title, file, rating) = uu
            __print__( dbg.DATA, uu )
            entries.append({
                "genre": cat,
                "url": url,
                "id": id,
                "format": self.mime_fmt(fmt) + "+zip",
                "title": title,
                "playing": file,







|







111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
            .*?    /formats/(\w+).png"
            .*?    title="([^">]+)">([^<>]+)</a>
            .*?    >Rated</a>\s*(\d+)
        """, re.X|re.S)
        
        for uu in rx_mod.findall(html):
            (url, id, fmt, title, file, rating) = uu
            #__print__( dbg.DATA, uu )
            entries.append({
                "genre": cat,
                "url": url,
                "id": id,
                "format": self.mime_fmt(fmt) + "+zip",
                "title": title,
                "playing": file,

Modified channels/shoutcast.py from [834f2c6518] to [2a36c113c8].

113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
               if (next < max):


                  #/radiolist.cfm?action=sub&string=&cat=Oldies&_cf_containerId=radiolist&_cf_nodebug=true&_cf_nocache=true&_cf_rc=0
                  #/radiolist.cfm?start=19&action=sub&string=&cat=Oldies&amount=18&order=listeners
                  # page
                  url = "http://www.shoutcast.com/radiolist.cfm?action=sub&string=&cat="+ucat+"&order=listeners&amount="+str(count)
                  __print__(dbg.HTTP, url)
                  referer = "http://www.shoutcast.com/?action=sub&cat="+ucat
                  params = {}
                  html = http.get(url, params=params, referer=referer, ajax=1)

                  #__print__(dbg.DATA, html)
                  #__print__(re.compile("id=(\d+)").findall(html));








<







113
114
115
116
117
118
119

120
121
122
123
124
125
126
               if (next < max):


                  #/radiolist.cfm?action=sub&string=&cat=Oldies&_cf_containerId=radiolist&_cf_nodebug=true&_cf_nocache=true&_cf_rc=0
                  #/radiolist.cfm?start=19&action=sub&string=&cat=Oldies&amount=18&order=listeners
                  # page
                  url = "http://www.shoutcast.com/radiolist.cfm?action=sub&string=&cat="+ucat+"&order=listeners&amount="+str(count)

                  referer = "http://www.shoutcast.com/?action=sub&cat="+ucat
                  params = {}
                  html = http.get(url, params=params, referer=referer, ajax=1)

                  #__print__(dbg.DATA, html)
                  #__print__(re.compile("id=(\d+)").findall(html));

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
                               "format": self.mime_fmt(div.find("td:eq(5)").text()),
                               "max": 0,
                               "genre": cat,
                          })


                  # display partial results (not strictly needed anymore, because we fetch just one page)
                  self.parent.status()
                  self.update_streams_partially_done(entries)
                  
                  # more pages to load?
                  next = 99999
                     
            except Exception as e:
               __print__(dbg.ERR, e)
               return entries
            
            #fin
            __print__(dbg.DATA, entries)
            return entries









<










|



195
196
197
198
199
200
201

202
203
204
205
206
207
208
209
210
211
212
213
214
215
                               "format": self.mime_fmt(div.find("td:eq(5)").text()),
                               "max": 0,
                               "genre": cat,
                          })


                  # display partial results (not strictly needed anymore, because we fetch just one page)

                  self.update_streams_partially_done(entries)
                  
                  # more pages to load?
                  next = 99999
                     
            except Exception as e:
               __print__(dbg.ERR, e)
               return entries
            
            #fin
            #__print__(dbg.DATA, entries)
            return entries


Modified channels/surfmusik.py from [6db3b80654] to [2563179fb4].

121
122
123
124
125
126
127


128
129
130
131
132
133
                    "genre": genre,
                    "format": ("video/html" if is_tv else "audio/mpeg"),
                })

                # limit result list
                if i > max:
                   break


                i += 1
 
        # done    
        return entries









>
>






121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
                    "genre": genre,
                    "format": ("video/html" if is_tv else "audio/mpeg"),
                })

                # limit result list
                if i > max:
                   break
                if i % 10 == 0:
                   self.parent.status(float(i)/float(max+5))
                i += 1
 
        # done    
        return entries


Modified channels/xiph.py from [b7c1ca97af] to [921a2a9392].

100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
            #__print__(dbg.DATA, data)
            
            #-- extract
            l = []
            __print__( dbg.PROC, "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )
            data = json.loads(data)
            for e in data.values():
                __print__(dbg.DATA, e)
                bitrate = int(e["bitrate"])
                if conf.xiph_min_bitrate and bitrate and bitrate >= int(conf.xiph_min_bitrate):
                  l.append({
                    "title": e["stream_name"],
                    "url": e["listen_url"],
                    "format": e["type"],
                    "bitrate": int(e["bitrate"]),







|







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
            #__print__(dbg.DATA, data)
            
            #-- extract
            l = []
            __print__( dbg.PROC, "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )
            data = json.loads(data)
            for e in data.values():
                #__print__(dbg.DATA, e)
                bitrate = int(e["bitrate"])
                if conf.xiph_min_bitrate and bitrate and bitrate >= int(conf.xiph_min_bitrate):
                  l.append({
                    "title": e["stream_name"],
                    "url": e["listen_url"],
                    "format": e["type"],
                    "bitrate": int(e["bitrate"]),

Modified channels/youtube.py from [068cf22437] to [26bc9c21d4].

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
            data["description"] = row["snippet"]["description"],

        return data


    # API version 2.0s jsonified XML needs different unpacking:
    def wrap2(self, row):
        __print__(dbg.DATA, row)
        return dict(
            genre = row["category"][1]["term"],
            title = row["title"]["$t"],
            playing = row["author"][0]["name"]["$t"],
            format = self.fmt,
            url = row["content"]["src"].split("?")[0],
            homepage = row["media$group"]["media$player"]["url"],
            image = row["media$group"]["media$thumbnail"][0]["url"],
        )










|












313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
            data["description"] = row["snippet"]["description"],

        return data


    # API version 2.0s jsonified XML needs different unpacking:
    def wrap2(self, row):
        #__print__(dbg.DATA, row)
        return dict(
            genre = row["category"][1]["term"],
            title = row["title"]["$t"],
            playing = row["author"][0]["name"]["$t"],
            format = self.fmt,
            url = row["content"]["src"].split("?")[0],
            homepage = row["media$group"]["media$player"]["url"],
            image = row["media$group"]["media$thumbnail"][0]["url"],
        )