Internet radio browser GUI for music/video streams from various directory services.

โŒˆโŒ‹ โŽ‡ branch:  streamtuner2


Check-in [ebae9e51ac]

Overview
Comment:Replace old __print__/dbg.XYZ references with new log.XYZ() wrapper.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: ebae9e51ace587c776685fc0416db7a23b821afd
User & Date: mario on 2015-04-21 06:05:56
Other Links: manifest | tags
Context
2015-04-21
22:00
Fix missing .module property for add_plugin_defaults. check-in: 266321da13 user: mario tags: trunk
06:05
Replace old __print__/dbg.XYZ references with new log.XYZ() wrapper. check-in: ebae9e51ac user: mario tags: trunk
2015-04-20
23:19
Fix explanation on why iCast fetching takes a little while for some categories. check-in: de5d9ad092 user: mario tags: trunk
Changes

Modified action.py from [d87b406b97] to [b04a438de7].

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# And finally this module is also used by exporting and (perhaps
# in the future) playlist importing features.


import re
import os
from ahttp import fix_url as http_fix_url, session
from config import conf, __print__ as debug, dbg
import platform
import copy
import json
from datetime import datetime
from xml.sax.saxutils import escape as xmlentities, unescape as xmlunescape









|







27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# And finally this module is also used by exporting and (perhaps
# in the future) playlist importing features.


import re
import os
from ahttp import fix_url as http_fix_url, session
from config import *
import platform
import copy
import json
from datetime import datetime
from xml.sax.saxutils import escape as xmlentities, unescape as xmlunescape


109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
]



# Exec wrapper
#
def run(cmd):
    debug(dbg.PROC, "Exec:", cmd)
    try:    os.system("start \"%s\"" % cmd if conf.windows else cmd + " &")
    except: debug(dbg.ERR, "Command not found:", cmd)


# Start web browser
#
def browser(url):
    bin = conf.play.get("url/http", "sensible-browser")
    print url







|

|







109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
]



# Exec wrapper
#
def run(cmd):
    log.PROC("Exec:", cmd)
    try:    os.system("start \"%s\"" % cmd if conf.windows else cmd + " &")
    except: log.ERR("Command not found:", cmd)


# Start web browser
#
def browser(url):
    bin = conf.play.get("url/http", "sensible-browser")
    print url
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
# Substitute .pls URL with local .m3u, or direct srv addresses, or leaves URL asis.
#  ยท Takes a single input `url` (and original row{} as template).
#  ยท But returns a list of [urls] after playlist extraction.
#  ยท If repackaging as .m3u/.pls/.xspf, returns the local [fn].
#
def convert_playlist(url, source, dest, local_file=True, row={}):
    urls = []
    debug(dbg.PROC, "convert_playlist(", url, source, dest, ")")

    # Leave alone if format matches, or if already "srv" URL, or if not http (local path, mms:/rtsp:)
    if source == dest or source in ("srv", "href") or not re.match("(https?|spdy)://", url):
        return [url]
    
    # Retrieve from URL
    (mime, cnt) = http_probe_get(url)
    
    # Leave streaming server as is
    if mime == "srv":
        cnt = ""
        return [url]

    # Deduce likely content format
    ext = probe_playlist_fn_ext(url)
    probe = probe_playlist_content(cnt)

    # Check ambiguity (except pseudo extension)
    if len(set([source, mime, probe])) > 1:
        debug(dbg.ERR, "Possible playlist format mismatch:", "listformat={}, http_mime={}, rx_probe={}, ext={}".format(source, mime, probe, ext))

    # Extract URLs from content
    for fmt in [id[0] for id in extract_playlist.extr_urls]:
        if not urls and fmt in (source, mime, probe, ext, "raw"):
            urls = extract_playlist(cnt).format(fmt)
            debug(dbg.DATA, "conversion from:", source, " with extractor:", fmt, "got URLs=", urls)
            
    # Return original, or asis for srv targets
    if not urls:
        return [url]
    elif dest in ("srv", "href"):
        return urls

    # Otherwise convert to local file
    if local_file:
        fn, is_unique = tmp_fn(cnt, dest)
        with open(fn, "w") as f:
            debug(dbg.DATA, "exporting with format:", dest, " into filename:", fn)
            f.write( save_playlist(source="srv", multiply=True).export(urls, row, dest) )
        return [fn]
    else:
        return urls


# Test URL/path "extension" for ".pls" / ".m3u" etc.







|



















|





|











|







205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
# Substitute .pls URL with local .m3u, or direct srv addresses, or leaves URL asis.
#  ยท Takes a single input `url` (and original row{} as template).
#  ยท But returns a list of [urls] after playlist extraction.
#  ยท If repackaging as .m3u/.pls/.xspf, returns the local [fn].
#
def convert_playlist(url, source, dest, local_file=True, row={}):
    urls = []
    log.PROC("convert_playlist(", url, source, dest, ")")

    # Leave alone if format matches, or if already "srv" URL, or if not http (local path, mms:/rtsp:)
    if source == dest or source in ("srv", "href") or not re.match("(https?|spdy)://", url):
        return [url]
    
    # Retrieve from URL
    (mime, cnt) = http_probe_get(url)
    
    # Leave streaming server as is
    if mime == "srv":
        cnt = ""
        return [url]

    # Deduce likely content format
    ext = probe_playlist_fn_ext(url)
    probe = probe_playlist_content(cnt)

    # Check ambiguity (except pseudo extension)
    if len(set([source, mime, probe])) > 1:
        log.ERR("Possible playlist format mismatch:", "listformat={}, http_mime={}, rx_probe={}, ext={}".format(source, mime, probe, ext))

    # Extract URLs from content
    for fmt in [id[0] for id in extract_playlist.extr_urls]:
        if not urls and fmt in (source, mime, probe, ext, "raw"):
            urls = extract_playlist(cnt).format(fmt)
            log.DATA("conversion from:", source, " with extractor:", fmt, "got URLs=", urls)
            
    # Return original, or asis for srv targets
    if not urls:
        return [url]
    elif dest in ("srv", "href"):
        return urls

    # Otherwise convert to local file
    if local_file:
        fn, is_unique = tmp_fn(cnt, dest)
        with open(fn, "w") as f:
            log.DATA("exporting with format:", dest, " into filename:", fn)
            f.write( save_playlist(source="srv", multiply=True).export(urls, row, dest) )
        return [fn]
    else:
        return urls


# Test URL/path "extension" for ".pls" / ".m3u" etc.
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
    mime = r.headers.get("content-type", "href")
    mime = mime.split(";")[0].strip()
    # Map MIME to abbr type (pls, m3u, xspf)
    if listfmt_t.get(mime):
        mime = listfmt_t.get(mime)
    # Raw content (mp3, flv)
    elif mediafmt_t.get(mime):
        debug(dbg.ERR, "Got media MIME type for expected playlist", mime, " on url=", url)
        mime = mediafmt_t.get(mime)
        return (mime, url)
    # Rejoin body
    content = "\n".join(str.decode(errors='replace') for str in r.iter_lines())
    return (mime, content)









|







285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
    mime = r.headers.get("content-type", "href")
    mime = mime.split(";")[0].strip()
    # Map MIME to abbr type (pls, m3u, xspf)
    if listfmt_t.get(mime):
        mime = listfmt_t.get(mime)
    # Raw content (mp3, flv)
    elif mediafmt_t.get(mime):
        log.ERR("Got media MIME type for expected playlist", mime, " on url=", url)
        mime = mediafmt_t.get(mime)
        return (mime, url)
    # Rejoin body
    content = "\n".join(str.decode(errors='replace') for str in r.iter_lines())
    return (mime, content)


309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    # Content of playlist file
    src = ""
    def __init__(self, text):
        self.src = text
        
    # Extract only URLs from given source type
    def format(self, fmt):
        debug(dbg.DATA, "input extractor/regex:", fmt, len(self.src))

        # find extractor
        if fmt in dir(self):
            return self.__dict__[fmt]()

        # regex scheme
        rx, decode = dict(self.extr_urls)[fmt]







|







309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
    # Content of playlist file
    src = ""
    def __init__(self, text):
        self.src = text
        
    # Extract only URLs from given source type
    def format(self, fmt):
        log.DATA("input extractor/regex:", fmt, len(self.src))

        # find extractor
        if fmt in dir(self):
            return self.__dict__[fmt]()

        # regex scheme
        rx, decode = dict(self.extr_urls)[fmt]
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
                    row["url"] = url
                    new_rows.append(row)
                    # Or just allow one stream per station in a playlist entry
                    if not self.multiply:
                        break
            rows = new_rows

        debug(dbg.DATA, "conversion to:", dest, "  with rows=", rows)

        # call conversion schemes
        converter = getattr(self, dest) or self.pls
        return converter(rows)

    # save directly
    def file(self, rows, dest, fn):







|







409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
                    row["url"] = url
                    new_rows.append(row)
                    # Or just allow one stream per station in a playlist entry
                    if not self.multiply:
                        break
            rows = new_rows

        log.DATA("conversion to:", dest, "  with rows=", rows)

        # call conversion schemes
        converter = getattr(self, dest) or self.pls
        return converter(rows)

    # save directly
    def file(self, rows, dest, fn):

Modified ahttp.py from [46b2bffbda] to [1c2ab28748].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#
# Provides a http "GET" method, but also does POST and AJAX-
# simulating requests too. Hooks into mains gtk.statusbar().
# And can normalize URLs to always carry a trailing slash
# after the domain name.


from config import conf, __print__, dbg
import requests


#-- hooks to progress meter and status bar in main window
feedback = None

# Sets either text or percentage of main windows' status bar.







|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#
# Provides a http "GET" method, but also does POST and AJAX-
# simulating requests too. Hooks into mains gtk.statusbar().
# And can normalize URLs to always carry a trailing slash
# after the domain name.


from config import *
import requests


#-- hooks to progress meter and status bar in main window
feedback = None

# Sets either text or percentage of main windows' status bar.
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64


#-- Retrieve data via HTTP
#
#  Well, it says "get", but it actually does POST and AJAXish GET requests too.
#
def get(url, params={}, referer="", post=0, ajax=0, binary=0, feedback=None, content=True):
    __print__( dbg.HTTP, "GET", url, params )

    # statusbar info
    progress_feedback(url)
    
    # combine headers
    headers = {}
    if ajax:







|







50
51
52
53
54
55
56
57
58
59
60
61
62
63
64


#-- Retrieve data via HTTP
#
#  Well, it says "get", but it actually does POST and AJAXish GET requests too.
#
def get(url, params={}, referer="", post=0, ajax=0, binary=0, feedback=None, content=True):
    log.HTTP("GET", url, params )

    # statusbar info
    progress_feedback(url)
    
    # combine headers
    headers = {}
    if ajax:
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
    
    # read
    if post:
        r = session.post(url, params=params, headers=headers, timeout=7.5)
    else:    
        r = session.get(url, params=params, headers=headers, timeout=9.75)

    __print__( dbg.HTTP, r.request.headers );
    __print__( dbg.HTTP, r.headers );
            
    # finish, clean statusbar
    #progress_feedback(0.9)
    #progress_feedback("")

    # result
    __print__( dbg.INFO, "Content-Length", len(r.content) )
    if not content:
        return r
    elif binary:
        return r.content
    else:
        return r.text








|
|






|







72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
    
    # read
    if post:
        r = session.post(url, params=params, headers=headers, timeout=7.5)
    else:    
        r = session.get(url, params=params, headers=headers, timeout=9.75)

    log.HTTP(r.request.headers );
    log.HTTP(r.headers );
            
    # finish, clean statusbar
    #progress_feedback(0.9)
    #progress_feedback("")

    # result
    log.INFO("Content-Length", len(r.content) )
    if not content:
        return r
    elif binary:
        return r.content
    else:
        return r.text

Modified channels/__init__.py from [f6644128e5] to [5cecf16359].

100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    #-- keep track of currently selected genre/category
    __current = None
    @property
    def current(self):
        return self.__current
    @current.setter
    def current(self, newcat):
        __print__(dbg.PROC, "{}.current:={} โ† from {}".format(self.module, newcat, [inspect.stack()[x][3] for x in range(1,4)]))
        self.__current = newcat
        return self.__current


    #--------------------------- initialization --------------------------------









|







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    #-- keep track of currently selected genre/category
    __current = None
    @property
    def current(self):
        return self.__current
    @current.setter
    def current(self, newcat):
        log.PROC("{}.current:={} โ† from {}".format(self.module, newcat, [inspect.stack()[x][3] for x in range(1,4)]))
        self.__current = newcat
        return self.__current


    #--------------------------- initialization --------------------------------


163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
        # add to main menu
        uikit.add_menu([parent.channelmenuitems], self.meta["title"], lambda w: parent.channel_switch_by_name(self.module) or 1)


    # Statusbar stub (defers to parent/main window, if in GUI mode)
    def status(self, *v):
        if self.parent: self.parent.status(*v)
        else: __print__(dbg.INFO, "status():", *v)


        
    #--------------------- streams/model data accesss ---------------------------
        
    # Get list of stations in current category
    def stations(self):







|







163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
        # add to main menu
        uikit.add_menu([parent.channelmenuitems], self.meta["title"], lambda w: parent.channel_switch_by_name(self.module) or 1)


    # Statusbar stub (defers to parent/main window, if in GUI mode)
    def status(self, *v):
        if self.parent: self.parent.status(*v)
        else: log.INFO("status():", *v)


        
    #--------------------- streams/model data accesss ---------------------------
        
    # Get list of stations in current category
    def stations(self):
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
        # called to early
        if not category:
            print "load(None)"
            return

        # get data from cache or download
        if force or not category in self.streams:
            __print__(dbg.PROC, "load", "update_streams")
            self.status("Updating streams...")
            self.status(-0.1)
            if category == "empty":
                new_streams = self.empty_stub
            else:
                new_streams = self.update_streams(category)
  
            if new_streams:
                # check and modify entry;
                # assert that title and url are present
                modified = []
                for row in new_streams:
                    if len(set(["", None]) & set([row.get("title"), row.get("url")])):
                        continue
                    try:
                        modified.append( self.postprocess(row) )
                    except Exception as e:
                        __print__(e, dbg.DATA, "Missing title or url. Postprocessing failed:", row)
                new_streams = modified
  
                # don't lose forgotten streams
                if conf.retain_deleted:
                   self.streams[category] = new_streams + self.deleted_streams(new_streams, self.streams.get(category,[]))
                else:
                   self.streams[category] = new_streams
  
                # save in cache
                self.save()
  
            else:
                # parse error
                self.status("Category parsed empty.")
                self.streams[category] = self.nothing_found
                __print__(dbg.INFO, "Oooops, parser returned nothing for category " + category)
                
        # assign to treeview model
        uikit.do(lambda:uikit.columns(self.gtk_list, self.datamap, self.prepare(self.streams[category])))

        # set pointer
        self.current = category
        self.status("")







|

















|















|







243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
        # called to early
        if not category:
            print "load(None)"
            return

        # get data from cache or download
        if force or not category in self.streams:
            log.PROC("load", "update_streams")
            self.status("Updating streams...")
            self.status(-0.1)
            if category == "empty":
                new_streams = self.empty_stub
            else:
                new_streams = self.update_streams(category)
  
            if new_streams:
                # check and modify entry;
                # assert that title and url are present
                modified = []
                for row in new_streams:
                    if len(set(["", None]) & set([row.get("title"), row.get("url")])):
                        continue
                    try:
                        modified.append( self.postprocess(row) )
                    except Exception as e:
                        log.DATA(e, "Missing title or url. Postprocessing failed:", row)
                new_streams = modified
  
                # don't lose forgotten streams
                if conf.retain_deleted:
                   self.streams[category] = new_streams + self.deleted_streams(new_streams, self.streams.get(category,[]))
                else:
                   self.streams[category] = new_streams
  
                # save in cache
                self.save()
  
            else:
                # parse error
                self.status("Category parsed empty.")
                self.streams[category] = self.nothing_found
                log.INFO("Oooops, parser returned nothing for category " + category)
                
        # assign to treeview model
        uikit.do(lambda:uikit.columns(self.gtk_list, self.datamap, self.prepare(self.streams[category])))

        # set pointer
        self.current = category
        self.status("")
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
        
    # display .current category, once notebook/channel tab is first opened
    def first_show(self):

        # Already processed
        if (self.shown == 55555):
            return
        __print__(dbg.PROC, self.module, "โ†’ first_show()", ", current=", self.current, ", categories=", len(self.categories))
    
        # if category tree is empty, initialize it
        if not self.categories:
            __print__(dbg.PROC, self.module, "โ†’ first_show() โ†’ reload_categories()");
            try:
                self.reload_categories()
            except:
                __print__(dbg.ERR, "HTTP error or extraction failure.")
                self.categories = ["empty"]
            self.display_categories()

        # Select first category
        if not self.current:
            self.current = self.str_from_struct(self.categories) or None
            __print__(dbg.STAT, self.module, "โ†’ first_show(); use first category as current =", self.current)
            self.shown = 0,

        # Show current category in any case
        __print__(dbg.UI, self.module, "โ†’ first_show(); station list โ†’ load(", self.current, ")")
        uikit.do(self.load, self.current)
    
        # put selection/cursor on last position
        if True:#self.shown != None:
            __print__(dbg.STAT, self.module+".first_show()", "select last known category treelist position =", self.shown)
            try:
                uikit.do(lambda:self.gtk_list.get_selection().select_path(self.shown))
            except:
                pass
            
        # Invoke only once
        self.shown = 55555







|



|



|






|



|




|







379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
        
    # display .current category, once notebook/channel tab is first opened
    def first_show(self):

        # Already processed
        if (self.shown == 55555):
            return
        log.PROC(self.module, "โ†’ first_show()", ", current=", self.current, ", categories=", len(self.categories))
    
        # if category tree is empty, initialize it
        if not self.categories:
            log.PROC(self.module, "โ†’ first_show() โ†’ reload_categories()");
            try:
                self.reload_categories()
            except:
                log.ERR("HTTP error or extraction failure.")
                self.categories = ["empty"]
            self.display_categories()

        # Select first category
        if not self.current:
            self.current = self.str_from_struct(self.categories) or None
            log.STAT(self.module, "โ†’ first_show(); use first category as current =", self.current)
            self.shown = 0,

        # Show current category in any case
        log.UI(self.module, "โ†’ first_show(); station list โ†’ load(", self.current, ")")
        uikit.do(self.load, self.current)
    
        # put selection/cursor on last position
        if True:#self.shown != None:
            log.STAT(self.module+".first_show()", "select last known category treelist position =", self.shown)
            try:
                uikit.do(lambda:self.gtk_list.get_selection().select_path(self.shown))
            except:
                pass
            
        # Invoke only once
        self.shown = 55555

Modified channels/bookmarks.py from [baf2fe6dd0] to [44aa6ec97b].

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85


    # all entries just come from "bookmarks.json"
    def cache(self):
        # stream list
        cache = conf.load(self.module)
        if (cache):
            __print__(dbg.PROC, "load bookmarks.json")
            self.streams = cache
        


    # save to cache file
    def save(self):
        conf.save(self.module, self.streams, nice=1)







|







71
72
73
74
75
76
77
78
79
80
81
82
83
84
85


    # all entries just come from "bookmarks.json"
    def cache(self):
        # stream list
        cache = conf.load(self.module)
        if (cache):
            log.PROC("load bookmarks.json")
            self.streams = cache
        


    # save to cache file
    def save(self):
        conf.save(self.module, self.streams, nice=1)
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
        return self.currentcat()
        
        
    # update bookmarks from freshly loaded streams data
    def heuristic_update(self, updated_channel, updated_category):

        if not conf.heuristic_bookmark_update: return
        __print__(dbg.PROC, "heuristic bookmark update")
        save = 0
        fav = self.streams["favourite"]

        # First we'll generate a list of current bookmark stream urls, and then
        # remove all but those from the currently UPDATED_channel + category.
        # This step is most likely redundant, but prevents accidently re-rewriting
        # stations that are in two channels (=duplicates with different PLS urls).







|







136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
        return self.currentcat()
        
        
    # update bookmarks from freshly loaded streams data
    def heuristic_update(self, updated_channel, updated_category):

        if not conf.heuristic_bookmark_update: return
        log.PROC("heuristic bookmark update")
        save = 0
        fav = self.streams["favourite"]

        # First we'll generate a list of current bookmark stream urls, and then
        # remove all but those from the currently UPDATED_channel + category.
        # This step is most likely redundant, but prevents accidently re-rewriting
        # stations that are in two channels (=duplicates with different PLS urls).

Modified channels/configwin.py from [913bf48a5c] to [fa4ff7a844].

63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
                    w.set_default(val)
                # list
                elif type(w) is gtk.ListStore:
                    w.clear()
                    for k,v in val.items():
                        w.append([k, v, True, self.app_bin_check(v)])
                    w.append(["", "", True, gtk.STOCK_NEW])
            __print__(dbg.CONF, "config load", prefix+key, val, type(w))

    # Store gtk widget valus back into conf. dict
    def save_config(self, config, prefix="config_", save=0):
        for key,val in config.items():
            w = self.main.get_widget(prefix + key)
            if w:
                # text







|







63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
                    w.set_default(val)
                # list
                elif type(w) is gtk.ListStore:
                    w.clear()
                    for k,v in val.items():
                        w.append([k, v, True, self.app_bin_check(v)])
                    w.append(["", "", True, gtk.STOCK_NEW])
            #log.CONF("config load", prefix+key, val, type(w))

    # Store gtk widget valus back into conf. dict
    def save_config(self, config, prefix="config_", save=0):
        for key,val in config.items():
            w = self.main.get_widget(prefix + key)
            if w:
                # text
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
                    config[key] = w.get_active()
                # dict
                elif type(w) is gtk.ListStore:
                    config[key] = {}
                    for row in w:
                        if row[0] and row[1]:
                            config[key][row[0]] = row[1]
            __print__(dbg.CONF, "config save", prefix+key, val)
    
    
    # Generic Gtk callback to update ListStore when entries get edited.
    # (The main signal_connect() dict prepares individual lambda funcs
    # for each ListStore column id.)
    def list_edit(self, liststore, path, column, new_text):
        liststore[path][column] = new_text







|







85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
                    config[key] = w.get_active()
                # dict
                elif type(w) is gtk.ListStore:
                    config[key] = {}
                    for row in w:
                        if row[0] and row[1]:
                            config[key][row[0]] = row[1]
            log.CONF("config save", prefix+key, val)
    
    
    # Generic Gtk callback to update ListStore when entries get edited.
    # (The main signal_connect() dict prepares individual lambda funcs
    # for each ListStore column id.)
    def list_edit(self, liststore, path, column, new_text):
        liststore[path][column] = new_text

Modified channels/dnd.py from [ef4879a2ca] to [fa727fbb20].

1
2
3
4
5
6
7
8
9
10
11

12
13
14
15
16
17
18
# encoding: UTF-8
# api: streamtuner2
# title: Drag and Drop (experimental)
# description: Copy streams/stations from and to other applications.
# depends: uikit
# version: 0.5
# type: interface
# config:
#   { name: dnd_format, type: select, value: xspf, select: "pls|m3u|xspf|jspf|asx|smil", description: "Default temporary file format for copying a station entry." }
# category: ui
# priority: experimental

#
# Implements Gtk/X11 drag and drop support for station lists.
# Should allow to export either just stream URLs, or complete
# PLS, XSPF collections.
#
# Also used by the bookmarks channel to copy favourites around.
# Which perhaps should even be constrained to just the bookmarks








|

|
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# encoding: UTF-8
# api: streamtuner2
# title: Drag and Drop (experimental)
# description: Copy streams/stations from and to other applications.
# depends: uikit
# version: 0.5
# type: interface
# config:
#   { name: dnd_format, type: select, value: xspf, select: "pls|m3u|xspf|jspf|asx|smil", description: "Default temporary file format for copying a station." }
# category: ui
# priority: default
# support: experimental
#
# Implements Gtk/X11 drag and drop support for station lists.
# Should allow to export either just stream URLs, or complete
# PLS, XSPF collections.
#
# Also used by the bookmarks channel to copy favourites around.
# Which perhaps should even be constrained to just the bookmarks

Modified channels/exportcat.py from [2dd3da6db4] to [fa4b19ec2a].

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

    # Fetch streams from category, show "Save as" dialog, then convert URLs and export as playlist file
    def savewindow(self, *w):
        cn = self.parent.channel()
        source = cn.listformat
        streams = cn.streams[cn.current]
        fn = uikit.save_file("Export category", None, "%s.%s.%s" % (cn.module, cn.current, conf.export_format))
        __print__(dbg.PROC, "Exporting category to", fn)
        if fn:
            dest = re.findall("\.(m3u8?|pls|xspf|jspf|json|smil|asx)8?$", fn.lower())
            if dest:
                dest = dest[0]
            else:
                self.parent.status("Unsupported export playlist type (file extension).")
                return
            action.save_playlist(source="asis", multiply=False).file(rows=streams, fn=fn, dest=dest)
        pass            







|









42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

    # Fetch streams from category, show "Save as" dialog, then convert URLs and export as playlist file
    def savewindow(self, *w):
        cn = self.parent.channel()
        source = cn.listformat
        streams = cn.streams[cn.current]
        fn = uikit.save_file("Export category", None, "%s.%s.%s" % (cn.module, cn.current, conf.export_format))
        log.PROC("Exporting category to", fn)
        if fn:
            dest = re.findall("\.(m3u8?|pls|xspf|jspf|json|smil|asx)8?$", fn.lower())
            if dest:
                dest = dest[0]
            else:
                self.parent.status("Unsupported export playlist type (file extension).")
                return
            action.save_playlist(source="asis", multiply=False).file(rows=streams, fn=fn, dest=dest)
        pass            

Modified channels/file.py from [62f947846d] to [710b8d2e10].

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

# ID3 libraries
try:
    from mutagen import File as get_meta
except:
    try:
        from ID3 import ID3
        __print__(dbg.INFO, "Just basic ID3 support")
        get_meta = lambda fn: dict([(k.lower(),v) for k,v in ID3(fn).iteritems()])
    except:
        __print__(dbg.INIT, "You are out of luck in regards to mp3 browsing. No ID3 support.")
        get_meta = lambda *x: {}


# work around mutagens difficult interface
def mutagen_postprocess(d):
    if d.get("TIT2"):
        return {







|


|







23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

# ID3 libraries
try:
    from mutagen import File as get_meta
except:
    try:
        from ID3 import ID3
        log.INFO("Just basic ID3 support")
        get_meta = lambda fn: dict([(k.lower(),v) for k,v in ID3(fn).iteritems()])
    except:
        log.INIT("You are out of luck in regards to mp3 browsing. No ID3 support.")
        get_meta = lambda *x: {}


# work around mutagens difficult interface
def mutagen_postprocess(d):
    if d.get("TIT2"):
        return {

Modified channels/global_key.py from [d213cc695c] to [9906a49477].

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    def __init__(self, parent):
        self.parent = parent
        conf.add_plugin_defaults(self.meta, self.module)
        try:
            for i,keyname in enumerate(conf.switch_key.split(",")):    # allow multiple keys
                keybinder.bind(keyname, self.switch, ((-1 if i else +1)))   # forward +1 or backward -1
        except:
            __print__(dbg.ERR, "plugin global_key: Key `%s` could not be registered" % conf.switch_key)
    
        
    # key event
    def switch(self, num, *any):
        
        # bookmarks, favourite
        channel, cat = conf.switch_channel.split(":")







|







39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    def __init__(self, parent):
        self.parent = parent
        conf.add_plugin_defaults(self.meta, self.module)
        try:
            for i,keyname in enumerate(conf.switch_key.split(",")):    # allow multiple keys
                keybinder.bind(keyname, self.switch, ((-1 if i else +1)))   # forward +1 or backward -1
        except:
            log.ERR("plugin global_key: Key `%s` could not be registered" % conf.switch_key)
    
        
    # key event
    def switch(self, num, *any):
        
        # bookmarks, favourite
        channel, cat = conf.switch_channel.split(":")

Modified channels/icast.py from [79c2eaaa83] to [84d1b6b174].

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# because each query result has only 10 entries.
# Which is why reloading takes a few seconds to
# collect 200 station entries (see main options).


import re
import json
from config import conf, dbg, __print__
from channels import *
import ahttp as http


# Surfmusik sharing site
class icast (ChannelPlugin):








|







31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# because each query result has only 10 entries.
# Which is why reloading takes a few seconds to
# collect 200 station entries (see main options).


import re
import json
from config import *
from channels import *
import ahttp as http


# Surfmusik sharing site
class icast (ChannelPlugin):

87
88
89
90
91
92
93
94
95
96
            data = json.loads(http.get( self.base + method + path, params))
            r += data["stations"]
            if len(r) >= data["meta"]["total_count"] or len(data["stations"]) < 10:
                break
            else:
                params["page"] = int(data["meta"]["current_page"]) + 1
                self.parent.status(params["page"] * 9.5 / float(conf.max_streams))
            #__print__(dbg.DATA, data)
        return r








|


87
88
89
90
91
92
93
94
95
96
            data = json.loads(http.get( self.base + method + path, params))
            r += data["stations"]
            if len(r) >= data["meta"]["total_count"] or len(data["stations"]) < 10:
                break
            else:
                params["page"] = int(data["meta"]["current_page"]) + 1
                self.parent.status(params["page"] * 9.5 / float(conf.max_streams))
            #log.DATA(data)
        return r

Modified channels/internet_radio.py from [32acc0c3fa] to [69d9499a8c].

24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# HTTP retrieval happens in one batch, determined by the number of pages
# setting, rather than the global max_streams option.
#


from channels import *
import re
from config import conf, __print__, dbg
import ahttp as http
from pq import pq




# streams and gui







|







24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# HTTP retrieval happens in one batch, determined by the number of pages
# setting, rather than the global max_streams option.
#


from channels import *
import re
from config import *
import ahttp as http
from pq import pq




# streams and gui
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

            # Is there a next page?
            if str(page+1) not in rx_pages.findall(html[-1]):
                break
            self.parent.status(float(page)/float(max_pages+1))

        # Alternatively try regex or pyquery parsing
        #__print__(dbg.HTTP, html)
        for use_rx in [not conf.pyquery, conf.pyquery]:
            try:
                entries = (self.with_regex(html) if use_rx else self.with_dom(html))
                if len(entries):
                    break
            except Exception as e:
                __print__(dbg.ERR, e)
                continue
            
        # fin
        return entries


    # Regex extraction
    def with_regex(self, html):
        __print__(dbg.PROC, "internet-radio, regex")
        r = []
        html = "\n".join(html)
        
        # Break up into <tr> blocks before extracting bits
        rx_tr = re.compile("""<tr[^>]*>(.+?)</tr>""", re.S)
        rx_data = re.compile(r"""
               playjp',\s*'(https?://[^'">]+)
               .*?   <h4.*?>([^<>]+)</
               .*?   <b>([^<>]*)</b>
         (?:   .*?   href="(.*?)"        )?
         (?:   .*?   Genres:((?:</?a[^>]+>|\w+|\s+)+)    )?
               .*?   (\d+)\s*Listeners
               .*?   (\d+)\s*Kbps
        """, re.S|re.X)

        for div in rx_tr.findall(html):
            if div.find('id="pagination"') < 0:
                #__print__(dbg.DATA, len(div))
                uu = rx_data.search(div)
                if uu:
                    (url, title, playing, homepage, genres, listeners, bitrate) = uu.groups()
                    
                    # transform data
                    r.append({
                        "url": url,
                        "genre": self.strip_tags(genres or ""),
                        "homepage": http.fix_url(homepage or ""),
                        "title": (title or "").strip().replace("\n", " "),
                        "playing": (playing or "").strip().replace("\n", " "),
                        "bitrate": int(bitrate or 0),
                        "listeners": int(listeners or 0),
                        "format": "audio/mpeg", # there is no stream info on that, but internet-radio.org.uk doesn't seem very ogg-friendly anyway, so we assume the default here
                    })
                else:
                    __print__(dbg.DATA, "Regex couldn't decipher entry:", div)
        return r


    # DOM traversing
    def with_dom(self, html_list):
        __print__(dbg.PROC, "internet-radio, dom")
        rx_numbers = re.compile("(\d+)")
        r = []
        for html in html_list:
            # the streams are arranged in table rows
            doc = pq(html)
            for dir in (pq(e) for e in doc("tr")):
                







|






|








|

















|
















|





|







80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

            # Is there a next page?
            if str(page+1) not in rx_pages.findall(html[-1]):
                break
            self.parent.status(float(page)/float(max_pages+1))

        # Alternatively try regex or pyquery parsing
        #log.HTTP(html)
        for use_rx in [not conf.pyquery, conf.pyquery]:
            try:
                entries = (self.with_regex(html) if use_rx else self.with_dom(html))
                if len(entries):
                    break
            except Exception as e:
                log.ERR(e)
                continue
            
        # fin
        return entries


    # Regex extraction
    def with_regex(self, html):
        log.PROC("internet-radio, regex")
        r = []
        html = "\n".join(html)
        
        # Break up into <tr> blocks before extracting bits
        rx_tr = re.compile("""<tr[^>]*>(.+?)</tr>""", re.S)
        rx_data = re.compile(r"""
               playjp',\s*'(https?://[^'">]+)
               .*?   <h4.*?>([^<>]+)</
               .*?   <b>([^<>]*)</b>
         (?:   .*?   href="(.*?)"        )?
         (?:   .*?   Genres:((?:</?a[^>]+>|\w+|\s+)+)    )?
               .*?   (\d+)\s*Listeners
               .*?   (\d+)\s*Kbps
        """, re.S|re.X)

        for div in rx_tr.findall(html):
            if div.find('id="pagination"') < 0:
                #log.DATA(len(div))
                uu = rx_data.search(div)
                if uu:
                    (url, title, playing, homepage, genres, listeners, bitrate) = uu.groups()
                    
                    # transform data
                    r.append({
                        "url": url,
                        "genre": self.strip_tags(genres or ""),
                        "homepage": http.fix_url(homepage or ""),
                        "title": (title or "").strip().replace("\n", " "),
                        "playing": (playing or "").strip().replace("\n", " "),
                        "bitrate": int(bitrate or 0),
                        "listeners": int(listeners or 0),
                        "format": "audio/mpeg", # there is no stream info on that, but internet-radio.org.uk doesn't seem very ogg-friendly anyway, so we assume the default here
                    })
                else:
                    log.DATA("Regex couldn't decipher entry:", div)
        return r


    # DOM traversing
    def with_dom(self, html_list):
        log.PROC("internet-radio, dom")
        rx_numbers = re.compile("(\d+)")
        r = []
        for html in html_list:
            # the streams are arranged in table rows
            doc = pq(html)
            for dir in (pq(e) for e in doc("tr")):
                

Modified channels/itunes.py from [4b51b14809] to [43771b8835].

28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#  โ†’ AccuRadio
#  โ†’ BBC
#
# In this module only iTunes will be queried for now.
#

import re
from config import conf, dbg, __print__
from channels import *
import ahttp as http


# Surfmusik sharing site
class itunes (ChannelPlugin):








|







28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#  โ†’ AccuRadio
#  โ†’ BBC
#
# In this module only iTunes will be queried for now.
#

import re
from config import *
from channels import *
import ahttp as http


# Surfmusik sharing site
class itunes (ChannelPlugin):

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        pass

    # Just copy over stream URLs and station titles
    def update_streams(self, cat):
    
        m3u = http.get(self.base, {"category": cat.lower()})
        if len(m3u) < 256:
            __print__(dbg.ERR, m3u)
        
        rx_m3u = re.compile(r"""
            ^File(\d+)\s*=\s*(http://[^\s]+)\s*$\s*
            ^Title\1\s*=\s*([^\r\n]+)\s*$\s*
        """, re.M|re.I|re.X)

        r = []







|







82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        pass

    # Just copy over stream URLs and station titles
    def update_streams(self, cat):
    
        m3u = http.get(self.base, {"category": cat.lower()})
        if len(m3u) < 256:
            log.ERR(m3u)
        
        rx_m3u = re.compile(r"""
            ^File(\d+)\s*=\s*(http://[^\s]+)\s*$\s*
            ^Title\1\s*=\s*([^\r\n]+)\s*$\s*
        """, re.M|re.I|re.X)

        r = []

Modified channels/jamendo.py from [92fc7a9153] to [43417ad008].

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# back directly. Playlists and albums now require a roundtrip over the action
# module to extract the JAMJson format into pls/m3u/xspf. (The previous v2 API
# retrieval is going to become inaccessible soon.)


import re
import ahttp as http
from config import conf, __print__, dbg
from channels import *
import json


# jamendo CC music sharing site
#
#







|







34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# back directly. Playlists and albums now require a roundtrip over the action
# module to extract the JAMJson format into pls/m3u/xspf. (The previous v2 API
# retrieval is going to become inaccessible soon.)


import re
import ahttp as http
from config import *
from channels import *
import json


# jamendo CC music sharing site
#
#

Modified channels/live365.py from [7936759236] to [43a955e299].

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36


# streamtuner2 modules
from config import conf
from uikit import uikit
import ahttp as http
from channels import *
from config import __print__, dbg
import action

# python modules
import re
import xml.dom.minidom
from xml.sax.saxutils import unescape as entity_decode, escape as xmlentities
import gtk







|







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36


# streamtuner2 modules
from config import conf
from uikit import uikit
import ahttp as http
from channels import *
from config import *
import action

# python modules
import re
import xml.dom.minidom
from xml.sax.saxutils import unescape as entity_decode, escape as xmlentities
import gtk

Modified channels/modarchive.py from [b5a34220a3] to [90763cbfb7].

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# setup wget/curl to download them.


import re
import ahttp as http
from config import conf
from channels import *
from config import __print__, dbg


# The MOD Archive
#
# Modarchive actually provides an API
# http://modarchive.org/index.php?xml-api
# (If only it wasn't XML based..)







|







25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# setup wget/curl to download them.


import re
import ahttp as http
from config import conf
from channels import *
from config import *


# The MOD Archive
#
# Modarchive actually provides an API
# http://modarchive.org/index.php?xml-api
# (If only it wasn't XML based..)
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
            .*?    /formats/(\w+)\.png"
            .*?    title="([^">]+)">([^<>]+)</a>
            .*?    >(?:Rated|Unrated)</a>\s*(\d*)
        """, re.X|re.S)
        
        for uu in rx_mod.findall(html):
            (url, id, fmt, title, file, rating) = uu
            #__print__( dbg.DATA, uu )
            entries.append({
                "genre": cat,
                "url": url,
                "id": id,
                "format": self.mime_fmt(fmt) + "+zip",
                "title": title,
                "playing": file,







|







95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
            .*?    /formats/(\w+)\.png"
            .*?    title="([^">]+)">([^<>]+)</a>
            .*?    >(?:Rated|Unrated)</a>\s*(\d*)
        """, re.X|re.S)
        
        for uu in rx_mod.findall(html):
            (url, id, fmt, title, file, rating) = uu
            #log.DATA( uu )
            entries.append({
                "genre": cat,
                "url": url,
                "id": id,
                "format": self.mime_fmt(fmt) + "+zip",
                "title": title,
                "playing": file,

Modified channels/punkcast.py from [946f14f97b] to [53641222c8].

24
25
26
27
28
29
30
31
32
33
34
35
36
37
38


import re
import ahttp as http
from config import conf
import action
from channels import *
from config import __print__, dbg


# basic.ch broadcast archive
class punkcast (ChannelPlugin):

    # keeps category titles->urls    
    catmap = {}







|







24
25
26
27
28
29
30
31
32
33
34
35
36
37
38


import re
import ahttp as http
from config import conf
import action
from channels import *
from config import *


# basic.ch broadcast archive
class punkcast (ChannelPlugin):

    # keeps category titles->urls    
    catmap = {}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
    def play(self, row):
    
        rx_sound = re.compile("""(http://[^"<>]+[.](mp3|ogg|m3u|pls|ram))""")
        html = http.get(row["homepage"])
        
        # look up ANY audio url
        for uu in rx_sound.findall(html):
            __print__( dbg.DATA, uu )
            (url, fmt) = uu
            action.play(url, self.mime_fmt(fmt), "srv")
            return
        
        # or just open webpage
        action.browser(row["homepage"])








|







77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
    def play(self, row):
    
        rx_sound = re.compile("""(http://[^"<>]+[.](mp3|ogg|m3u|pls|ram))""")
        html = http.get(row["homepage"])
        
        # look up ANY audio url
        for uu in rx_sound.findall(html):
            log.DATA( uu )
            (url, fmt) = uu
            action.play(url, self.mime_fmt(fmt), "srv")
            return
        
        # or just open webpage
        action.browser(row["homepage"])

Modified channels/radiobrowser.py from [eb4063d5ff] to [11cf6e90ac].

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# entries. Furthermore includes station homepage links!
#
# Also has a neat JSON API, so is quite easy to support.


import re
import json
from config import conf, dbg, __print__
from channels import *
import ahttp as http


# API endpoints:
# http://www.radio-browser.info/webservice/json/countries
# http://www.radio-browser.info/webservice/json/languages







|







31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# entries. Furthermore includes station homepage links!
#
# Also has a neat JSON API, so is quite easy to support.


import re
import json
from config import *
from channels import *
import ahttp as http


# API endpoints:
# http://www.radio-browser.info/webservice/json/countries
# http://www.radio-browser.info/webservice/json/languages

Modified channels/radiotray.py from [1ad37964fc] to [60345f8f8f].

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    # send to 
    def share(self, *w):
        row = self.parent.row()
        if row:
            # RadioTray doesn't have an addRadio method yet, so just fall back to play the stream URL
            try:
                group = self.map_group(row.get("genre"))
                __print__(dbg.PROC, "mapping genre '%s' to RT group '%s'" % (row["genre"], group))
                self.radiotray().addRadio(row["title"], row["url"], group)
            except:
                self.radiotray().playUrl(row["url"])
        pass

    # match genre to RT groups
    def map_group(self, genre):







|







99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    # send to 
    def share(self, *w):
        row = self.parent.row()
        if row:
            # RadioTray doesn't have an addRadio method yet, so just fall back to play the stream URL
            try:
                group = self.map_group(row.get("genre"))
                log.PROC("mapping genre '%s' to RT group '%s'" % (row["genre"], group))
                self.radiotray().addRadio(row["title"], row["url"], group)
            except:
                self.radiotray().playUrl(row["url"])
        pass

    # match genre to RT groups
    def map_group(self, genre):

Modified channels/search.py from [e665e21e39] to [14f30d1313].

98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    # live search on directory server homepages
    def server_search(self, w):
        self.prepare_search()
        entries = []
        for i,cn in enumerate([self.main.channels[c] for c in self.targets]):
            if cn.has_search:  # "search" in cn.update_streams.func_code.co_varnames:
                self.main.status("Server searching: " + cn.module)
                __print__(dbg.PROC, "has_search:", cn.module)
                try:
                    add = cn.update_streams(cat=None, search=self.q)
                    for row in add:
                        row["genre"] = cn.meta["title"] + " " + row.get("genre", "")
                    entries += add
                except:
                    continue







|







98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    # live search on directory server homepages
    def server_search(self, w):
        self.prepare_search()
        entries = []
        for i,cn in enumerate([self.main.channels[c] for c in self.targets]):
            if cn.has_search:  # "search" in cn.update_streams.func_code.co_varnames:
                self.main.status("Server searching: " + cn.module)
                log.PROC("has_search:", cn.module)
                try:
                    add = cn.update_streams(cat=None, search=self.q)
                    for row in add:
                        row["genre"] = cn.meta["title"] + " " + row.get("genre", "")
                    entries += add
                except:
                    continue

Modified channels/shoutcast.py from [1fad5ce44e] to [7ab0770356].

22
23
24
25
26
27
28
29

30
31
32
33
34
35
36
37
38
# It has been aquired by Radionomy in 2014, since then significant changes
# took place. The former yellow pages API got deprecated.


import ahttp as http
from json import loads as json_decode
import re
from config import conf, __print__, dbg

from pq import pq
from channels import *    # works everywhere but in this plugin(???!)
import channels
from compat2and3 import urllib



# SHOUTcast data module
#







|
>

<







22
23
24
25
26
27
28
29
30
31

32
33
34
35
36
37
38
# It has been aquired by Radionomy in 2014, since then significant changes
# took place. The former yellow pages API got deprecated.


import ahttp as http
from json import loads as json_decode
import re
from config import *
from channels import *
from pq import pq

import channels
from compat2and3 import urllib



# SHOUTcast data module
#
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    streams = {}
    
        
    # Extracts the category list from www.shoutcast.com,
    # stores a catmap (title => id)
    def update_categories(self):
        html = http.get(self.base_url)
        #__print__( dbg.DATA, html )
        self.categories = []
        
        # Genre list in sidebar
        """  <li><a id="genre-90" href="/Genre?name=Adult" onclick="loadStationsByGenre('Adult', 90, 89); return false;">Adult</a></li> """
        rx = re.compile(r"loadStationsByGenre\(  '([^']+)' [,\s]* (\d+) [,\s]* (\d+)  \)", re.X)
        subs = rx.findall(html)








|







63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    streams = {}
    
        
    # Extracts the category list from www.shoutcast.com,
    # stores a catmap (title => id)
    def update_categories(self):
        html = http.get(self.base_url)
        #log.DATA( html )
        self.categories = []
        
        # Genre list in sidebar
        """  <li><a id="genre-90" href="/Genre?name=Adult" onclick="loadStationsByGenre('Adult', 90, 89); return false;">Adult</a></li> """
        rx = re.compile(r"loadStationsByGenre\(  '([^']+)' [,\s]* (\d+) [,\s]* (\d+)  \)", re.X)
        subs = rx.findall(html)

90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
        pass
        

    # downloads stream list from shoutcast for given category
    def update_streams(self, cat):

        if (cat not in self.catmap):
            __print__( dbg.ERR, "Category not in known map.", cat )
            return []
        id = self.catmap[cat]

        # page
        url = "http://www.shoutcast.com/Home/BrowseByGenre"
        params = { "genrename": cat }
        referer = None
        try:
            json = http.get(url, params=params, referer=referer, post=1, ajax=1)
            json = json_decode(json)
        except:
            __print__(dbg.ERR, "HTTP request or JSON decoding failed. Outdated python/requests perhaps.")
            return []
        self.parent.status(0.75)

        # remap JSON
        entries = []
        for e in json:
            entries.append({
                "id": int(e.get("ID", 0)),
                "genre": str(e.get("Genre", "")),
                "title": str(e.get("Name", "")),
                "playing": str(e.get("CurrentTrack", "")),
                "bitrate": int(e.get("Bitrate", 0)),
                "listeners": int(e.get("Listeners", 0)),
                "url": "http://yp.shoutcast.com/sbin/tunein-station.pls?id=%s" % e.get("ID", "0"),
                "homepage": "",
                "format": "audio/mpeg"
            })

        #__print__(dbg.DATA, entries)
        return entries








|











|


















|


90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
        pass
        

    # downloads stream list from shoutcast for given category
    def update_streams(self, cat):

        if (cat not in self.catmap):
            log.ERR( "Category not in known map.", cat )
            return []
        id = self.catmap[cat]

        # page
        url = "http://www.shoutcast.com/Home/BrowseByGenre"
        params = { "genrename": cat }
        referer = None
        try:
            json = http.get(url, params=params, referer=referer, post=1, ajax=1)
            json = json_decode(json)
        except:
            log.ERR("HTTP request or JSON decoding failed. Outdated python/requests perhaps.")
            return []
        self.parent.status(0.75)

        # remap JSON
        entries = []
        for e in json:
            entries.append({
                "id": int(e.get("ID", 0)),
                "genre": str(e.get("Genre", "")),
                "title": str(e.get("Name", "")),
                "playing": str(e.get("CurrentTrack", "")),
                "bitrate": int(e.get("Bitrate", 0)),
                "listeners": int(e.get("Listeners", 0)),
                "url": "http://yp.shoutcast.com/sbin/tunein-station.pls?id=%s" % e.get("ID", "0"),
                "homepage": "",
                "format": "audio/mpeg"
            })

        #log.DATA(entries)
        return entries

Modified channels/somafm.py from [353f76d560] to [b4e377590b].

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# only rewritten depending on bitrate configuration.
#
# Note that only 64bit AAC and 128bit MP3 are guaranteed
# to be available. Most stations offer different bitrates,
# but not all of them!


from config import conf, dbg, __print__
from channels import *
import re
import ahttp

# TuneIn radio directory
class somafm (ChannelPlugin):








|







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# only rewritten depending on bitrate configuration.
#
# Note that only 64bit AAC and 128bit MP3 are guaranteed
# to be available. Most stations offer different bitrates,
# but not all of them!


from config import *
from channels import *
import re
import ahttp

# TuneIn radio directory
class somafm (ChannelPlugin):

Modified channels/surfmusik.py from [a963360d8b] to [c2116e521c].

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# be somewhat slow (for querying the actual mms:// streams).
#
#
#

import re
import ahttp as http
from config import conf, dbg, __print__
from channels import *



# Surfmusik sharing site
class surfmusik (ChannelPlugin):








|







30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# be somewhat slow (for querying the actual mms:// streams).
#
#
#

import re
import ahttp as http
from config import *
from channels import *



# Surfmusik sharing site
class surfmusik (ChannelPlugin):

Modified channels/timer.py from [f6d2128cb5] to [d2257940ad].

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
            ("timer_dialog", "delete-event"): self.hide,
        })
        
        # prepare spool
        self.sched = kronos.ThreadedScheduler()
        for row in self.streams:
            try: self.queue(row)
            except Exception as e: __print__(dbg.ERR, "queuing error", e)
        self.sched.start()


    # display GUI for setting timespec
    def edit_timer(self, *w):
        self.parent.timer_dialog.show()
        self.parent.timer_value.set_text("Fri,Sat 20:00-21:00 play")







|







72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
            ("timer_dialog", "delete-event"): self.hide,
        })
        
        # prepare spool
        self.sched = kronos.ThreadedScheduler()
        for row in self.streams:
            try: self.queue(row)
            except Exception as e: log.ERR("queuing error", e)
        self.sched.start()


    # display GUI for setting timespec
    def edit_timer(self, *w):
        self.parent.timer_dialog.show()
        self.parent.timer_value.set_text("Fri,Sat 20:00-21:00 play")
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
            activity, action_method = "record", self.record
        else:
            activity, action_method = "play", self.play
        
        # add
        task = self.sched.add_daytime_task(action_method, activity, days, None, time, kronos.method.threaded, [row], {})

        #__print__( "queue",  act, self.sched, (action_method, act, days, None, time, kronos.method.threaded, [row], {}), task.get_schedule_time(True) )
    
    
    
    # converts Mon,Tue,... into numberics 1-7
    def days(self, s):
        weekdays = ["su", "mo", "tu", "we", "th", "fr", "sa", "su"]
        r = []







|







132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
            activity, action_method = "record", self.record
        else:
            activity, action_method = "play", self.play
        
        # add
        task = self.sched.add_daytime_task(action_method, activity, days, None, time, kronos.method.threaded, [row], {})

        #log.QUEUE( act, self.sched, (action_method, act, days, None, time, kronos.method.threaded, [row], {}), task.get_schedule_time(True) )
    
    
    
    # converts Mon,Tue,... into numberics 1-7
    def days(self, s):
        weekdays = ["su", "mo", "tu", "we", "th", "fr", "sa", "su"]
        r = []

Modified channels/tunein.py from [9721a1e7b7] to [0bca890c80].

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# heaps more talk and local show entries, etc. (Would require
# more deeply nested category tree.)
#


import re
import json
from config import conf, dbg, __print__
from channels import *
import ahttp as http
from xml.etree import ElementTree


# TuneIn radio directory
class tunein (ChannelPlugin):







|







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# heaps more talk and local show entries, etc. (Would require
# more deeply nested category tree.)
#


import re
import json
from config import *
from channels import *
import ahttp as http
from xml.etree import ElementTree


# TuneIn radio directory
class tunein (ChannelPlugin):

Modified channels/ubuntuusers.py from [8e76324f70] to [e0b735eacf].

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Short user-collected list of internet radio stations
# on UbuntuUsers.de wiki. Only provides a single category,
# but stations are grouped by country already.
#


import re
from config import conf, dbg, __print__
from channels import *
import ahttp


# UU Wiki radio list
class ubuntuusers (ChannelPlugin):








|







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Short user-collected list of internet radio stations
# on UbuntuUsers.de wiki. Only provides a single category,
# but stations are grouped by country already.
#


import re
from config import *
from channels import *
import ahttp


# UU Wiki radio list
class ubuntuusers (ChannelPlugin):

Modified channels/useragentswitcher.py from [08fd23fe89] to [51e2b87f3f].

39
40
41
42
43
44
45
46
47
48
49
        parent.hooks["config_save"].append(self.apply)
        self.apply()    

    # set new browser string in requests session
    def apply(self):
        ua = self.map.get(conf.useragent.lower(), self.map["default"])
        if ua:
            __print__(dbg.HTTP, "UserAgentSwitcher:", ua)
            ahttp.session.headers.update({ "User-Agent": ua })









|



39
40
41
42
43
44
45
46
47
48
49
        parent.hooks["config_save"].append(self.apply)
        self.apply()    

    # set new browser string in requests session
    def apply(self):
        ua = self.map.get(conf.useragent.lower(), self.map["default"])
        if ua:
            log.HTTP("UserAgentSwitcher:", ua)
            ahttp.session.headers.update({ "User-Agent": ua })


Modified channels/xiph.py from [df3676cdef] to [c02f4fea70].

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
      if cat:
          params["cat"] = cat.lower()
      if search:
          params["search"] = search
      
      #-- get data
      data = http.get(self.json_url, params=params)
      #__print__(dbg.DATA, data)
      
      #-- extract
      l = []
      __print__( dbg.PROC, "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )
      data = json.loads(data)
      for e in data:
          #__print__(dbg.DATA, e)
          bitrate = int(e["bitrate"])
          if conf.xiph_min_bitrate and bitrate and bitrate >= int(conf.xiph_min_bitrate):
              if not len(l) or l[-1]["title"] != e["stream_name"]:
                  l.append({
                    "title": e["stream_name"],
                    "url": e["listen_url"],
                    "format": e["type"],







|



|


|







101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
      if cat:
          params["cat"] = cat.lower()
      if search:
          params["search"] = search
      
      #-- get data
      data = http.get(self.json_url, params=params)
      #log.DATA(data)
      
      #-- extract
      l = []
      log.PROC( "processing api.dir.xiph.org JSON (via api.include-once.org cache)" )
      data = json.loads(data)
      for e in data:
          #log.DATA(e)
          bitrate = int(e["bitrate"])
          if conf.xiph_min_bitrate and bitrate and bitrate >= int(conf.xiph_min_bitrate):
              if not len(l) or l[-1]["title"] != e["stream_name"]:
                  l.append({
                    "title": e["stream_name"],
                    "url": e["listen_url"],
                    "format": e["type"],

Modified channels/youtube.py from [0b836cb39e] to [16920f30fd].

239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
        # URL and default parameters
        (base_url, defaults) = self.service[ver]
        params = dict( list(defaults.items()) + list(params.items())  )

        # Retrieve data set
        while pages > 0:
            j = ahttp.get(base_url + method, params=params)
            #__print__(dbg.DATA, j)
            if j:
                # json decode
                data = json.loads(j)
                
                # extract items
                if "items" in data:
                    items += data["items"]







|







239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
        # URL and default parameters
        (base_url, defaults) = self.service[ver]
        params = dict( list(defaults.items()) + list(params.items())  )

        # Retrieve data set
        while pages > 0:
            j = ahttp.get(base_url + method, params=params)
            #log.DATA(j)
            if j:
                # json decode
                data = json.loads(j)
                
                # extract items
                if "items" in data:
                    items += data["items"]
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
            data["description"] = row["snippet"]["description"],

        return data


    # API version 2.0s jsonified XML needs different unpacking:
    def wrap2(self, row):
        #__print__(dbg.DATA, row)
        return dict(
            genre = row["category"][1]["term"],
            title = row["title"]["$t"],
            playing = row["author"][0]["name"]["$t"],
            format = self.audioformat,
            url = row["content"]["src"].split("?")[0],
            homepage = row["media$group"]["media$player"]["url"],
            image = row["media$group"]["media$thumbnail"][0]["url"],
        )










|












300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
            data["description"] = row["snippet"]["description"],

        return data


    # API version 2.0s jsonified XML needs different unpacking:
    def wrap2(self, row):
        #log.DATA(row)
        return dict(
            genre = row["category"][1]["term"],
            title = row["title"]["$t"],
            playing = row["author"][0]["name"]["$t"],
            format = self.audioformat,
            url = row["content"]["src"].split("?")[0],
            homepage = row["media$group"]["media$player"]["url"],
            image = row["media$group"]["media$thumbnail"][0]["url"],
        )



Modified config.py from [8f2ac16ca3] to [b88512db4c].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#
# encoding: UTF-8
# api: streamtuner2
# type: class
# title: global config object
# description: reads ~/.config/streamtuner/*.json files
# config:
#    { arg: -d,     type: str,      name: disable[], description: Omit plugin from initialization.  }
#    { arg: -e,     type: str,      name: enable[],  description: Add channel plugin.  }
#    { arg: --gtk3, type: boolean,  name: gtk3,      description: Start with Gtk3 interface. }
#    { arg: -D,     type: boolean,  name: debug,     description: Enable debug messages on console }
#    { arg: action, type: str *,    name: action[],  description: CLI interface commands. }
#    { arg: -x,     type: boolean,  name: exit,      hidden: 1 }
# version: 2.5
# priority: core
#
# In the main application or module files which need access
# to a global conf.* object, just import this module as follows:
#
#   from config import *
#
# Here conf is already an instantiation of the underlying
# ConfigDoct class.
#
# Also provides the logging function __print__, and basic
# plugin handling code: plugin_meta() and module_list(),
# and the relative get_data() alias (files from pyzip/path).
#

from __future__ import print_function
import os
import sys
import json
import gzip
import platform
import re
from compat2and3 import gzip_decode, find_executable
import zlib
import zipfile
import inspect
import pkgutil
import argparse

# export symbols
__all__ = ["conf", "log", "__print__", "dbg", "plugin_meta", "module_list", "get_data", "find_executable"]


#-- create a stub instance of config object
conf = object()

# separate instance of netrc, if needed
netrc = None













|










|



















|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#
# encoding: UTF-8
# api: streamtuner2
# type: class
# title: global config object
# description: reads ~/.config/streamtuner/*.json files
# config:
#    { arg: -d,     type: str,      name: disable[], description: Omit plugin from initialization.  }
#    { arg: -e,     type: str,      name: enable[],  description: Add channel plugin.  }
#    { arg: --gtk3, type: boolean,  name: gtk3,      description: Start with Gtk3 interface. }
#    { arg: -D,     type: boolean,  name: debug,     description: Enable debug messages on console }
#    { arg: action, type: str *,    name: action[],  description: CLI interface commands. }
#    { arg: -x,     type: boolean,  name: exit,      hidden: 1 }
# version: 2.6
# priority: core
#
# In the main application or module files which need access
# to a global conf.* object, just import this module as follows:
#
#   from config import *
#
# Here conf is already an instantiation of the underlying
# ConfigDoct class.
#
# Also provides the logging function log.TYPE(...) and basic
# plugin handling code: plugin_meta() and module_list(),
# and the relative get_data() alias (files from pyzip/path).
#

from __future__ import print_function
import os
import sys
import json
import gzip
import platform
import re
from compat2and3 import gzip_decode, find_executable
import zlib
import zipfile
import inspect
import pkgutil
import argparse

# export symbols
__all__ = ["conf", "log", "plugin_meta", "module_list", "get_data", "find_executable"]


#-- create a stub instance of config object
conf = object()

# separate instance of netrc, if needed
netrc = None
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
       |  '  ([^']*)  '                        #  "quoted" or 'singl' values
       |     ([^,]*)                           #  or unquoted literals
     )
    """, re.X)




# wrapper for all print statements
def __print__(*args):
    if "debug" in conf and conf.debug or args[0] == dbg.ERR:
        print(" ".join([str(a) for a in args]), file=sys.stderr)


# error colorization
dbg = type('obj', (object,), {
    "ERR":  r"[ERR]",  # red    ERROR
    "INIT": r"[INIT]", # red    INIT ERROR
    "PROC": r"[PROC]", # green  PROCESS
    "CONF": r"[CONF]", # brown  CONFIG DATA
    "UI":   r"[UI]",   # blue   USER INTERFACE BEHAVIOUR
    "HTTP": r"[HTTP]", # magenta HTTP REQUEST
    "DATA": r"[DATA]", # cyan   DATA
    "INFO": r"[INFO]", # gray   INFO
    "STAT": r"[STATE]", # gray  CONFIG STATE
})


# Simplified print wrapper: `log.err(...)`
class log_printer(object):

    # Wrapper
    method = None
    def __getattr__(self, name):
        self.method = name
        return self.__print__
    
    # Printer
    def __print__(self, *args, **kwargs):
        # debug level
        method = self.method.upper()
        if not method == "ERR":
            if "debug" in conf and not conf.debug:
                return
        # color/prefix
        method = r"[{}[{}]".format(self.colors.get(method, "47m"), method)







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







|


|







462
463
464
465
466
467
468





















469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
       |  '  ([^']*)  '                        #  "quoted" or 'singl' values
       |     ([^,]*)                           #  or unquoted literals
     )
    """, re.X)
























# Simplified print wrapper: `log.err(...)`
class log_printer(object):

    # Wrapper
    method = None
    def __getattr__(self, name):
        self.method = name
        return self.log_print
    
    # Printer
    def log_print(self, *args, **kwargs):
        # debug level
        method = self.method.upper()
        if not method == "ERR":
            if "debug" in conf and not conf.debug:
                return
        # color/prefix
        method = r"[{}[{}]".format(self.colors.get(method, "47m"), method)

Modified contrib/8tracks.py from [f8775355d0] to [8dc7ba54d9].

14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# Requires a pingback on playing, which is near impossible to implement
# without also controlling the player. Automatic/implied notifications
# could work, or checking via dbus/mpris even.
#

import re
import json
from config import conf, dbg, __print__
from channels import *
import ahttp as http


# Surfmusik sharing site
class _8tracks (ChannelPlugin):








|







14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# Requires a pingback on playing, which is near impossible to implement
# without also controlling the player. Automatic/implied notifications
# could work, or checking via dbus/mpris even.
#

import re
import json
from config import *
from channels import *
import ahttp as http


# Surfmusik sharing site
class _8tracks (ChannelPlugin):

Modified help/action_saving.page from [fb016701c8] to [8a7c8020d6].

15
16
17
18
19
20
21
22
23
24
25
26
27
28
	<key>F2</key>.  A file dialog opens, where you can adapt the title.
	</p>

	<p>The extension of the filename decides on the saved link format.  Per
	default a .m3u file is created, because that's what most audio players
	understand.</p>

	<note><p> But you can also save in <file>.pls</file> or <file>.xspf</file>
	or <file>.asx</file> or <file>.smil</file> format.  Note that the
	lower right dropdown has no effect.  You have to edit the extension
	into the filename field.</p></note>


</page>







|
|
|
|



15
16
17
18
19
20
21
22
23
24
25
26
27
28
	<key>F2</key>.  A file dialog opens, where you can adapt the title.
	</p>

	<p>The extension of the filename decides on the saved link format.  Per
	default a .m3u file is created, because that's what most audio players
	understand.</p>

	<note><p> You can also save in <file>.pls</file> or <file>.xspf</file>
	or <file>.asx</file> or <file>.smil</file> format.  In current
	releases the file extension is automatically adapted when changing
	the filter dropdown (bottom right corner in the file dialog). </p></note>


</page>

Modified help/search.page from [201f70e153] to [48c7a270bc].

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
	to scan for.</p>

	<media src="img/search.png" type="image" mime="image/png" />

        <p>It either will search in all channels, or just the last active
        channel/service.</p>

	<p>Then there are two search methods.  You mostly want to use
        the live <gui>Server search</gui>.  It passes your search terms to
        the actual directory services, and loads the most recent data into a
        result list. This might take a few seconds. And it's not implemented
        for all channel plugins however.</p>

	<p>With <gui>Cache find</gui> would just look up entries in your
        already downloaded channel/genre lists.  This is sufficient when
        you're just looking for something previously seen/listended to.</p>

	</section>

</page>







|



|

|

|




29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
	to scan for.</p>

	<media src="img/search.png" type="image" mime="image/png" />

        <p>It either will search in all channels, or just the last active
        channel/service.</p>

	<p>Then there are two search variants.  You mostly want to use
        the live <gui>Server search</gui>.  It passes your search terms to
        the actual directory services, and loads the most recent data into a
        result list. This might take a few seconds. And it's not implemented
        for all channel plugins. (Some websites/APIs don't have a search.)</p>

	<p>While <gui>Cache find</gui> would just look up entries in your
        already downloaded channel/genre lists.  This is sufficient when
        you're just looking for something previously seen/listened to.</p>

	</section>

</page>

Modified st2.py from [5611c8c9c5] to [0608c8f1cb].

119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        # and late plugin initializations
        [callback(self) for callback in self.hooks["init"]]

        # display current open channel/notebook tab
        gui_startup(18/20.0)
        self.current_channel = self.current_channel_gtk()
        try: self.channel().first_show()
        except: __print__(dbg.INIT, "main.__init__: current_channel.first_show() initialization error")

  
        # bind gtk/glade event names to functions
        gui_startup(19.75/20.0)
        self.connect_signals({
            "gtk_main_quit" : self.gtk_main_quit,                # close window
            # treeviews / notebook







|







119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        # and late plugin initializations
        [callback(self) for callback in self.hooks["init"]]

        # display current open channel/notebook tab
        gui_startup(18/20.0)
        self.current_channel = self.current_channel_gtk()
        try: self.channel().first_show()
        except: log.INIT("main.__init__: current_channel.first_show() initialization error")

  
        # bind gtk/glade event names to functions
        gui_startup(19.75/20.0)
        self.connect_signals({
            "gtk_main_quit" : self.gtk_main_quit,                # close window
            # treeviews / notebook
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
    def current_channel_gtk(self):
        return self.channel_names[self.notebook_channels.get_current_page()]
    
        
    # Notebook tab has been clicked (receives numeric page_num), but *NOT* yet changed (visually).
    def channel_switch(self, notebook, page, page_num=0, *args):
        self.current_channel = notebook.get_menu_label_text(notebook.get_nth_page(page_num))
        __print__(dbg.UI, "main.channel_switch() :=", self.current_channel)
        self.update_title()
        # if first selected, load current category
        # (run in thread, to make it look speedy on first startup)
        self.thread( self.channel().first_show )

    # Invoked from the menu instead, uses module name instead of numeric tab id
    def channel_switch_by_name(self, name):







|







228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
    def current_channel_gtk(self):
        return self.channel_names[self.notebook_channels.get_current_page()]
    
        
    # Notebook tab has been clicked (receives numeric page_num), but *NOT* yet changed (visually).
    def channel_switch(self, notebook, page, page_num=0, *args):
        self.current_channel = notebook.get_menu_label_text(notebook.get_nth_page(page_num))
        log.UI("main.channel_switch() :=", self.current_channel)
        self.update_title()
        # if first selected, load current category
        # (run in thread, to make it look speedy on first startup)
        self.thread( self.channel().first_show )

    # Invoked from the menu instead, uses module name instead of numeric tab id
    def channel_switch_by_name(self, name):
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
        url = self.selected("homepage")
        if url and len(url): action.browser(url)
        else: self.status("No homepage URL present.")

    # Browse to channel homepage (double click on notebook tab)
    def on_homepage_channel_clicked(self, widget, event=2):
        if event == 2 or event.type == gtk.gdk._2BUTTON_PRESS:
            __print__(dbg.UI, "dblclick")
            url = self.channel().meta.get("url", "https://duckduckgo.com/?q=" + self.channel().module)
            action.browser(url)

    # Reload stream list in current channel-category
    def on_reload_clicked(self, widget=None, reload=1):
        __print__(dbg.UI, "on_reload_clicked()", "reload=", reload, "current_channel=", self.current_channel, "c=", self.channels[self.current_channel], "cat=", self.channel().current)
        category = self.channel().current
        self.thread(
                       #@TODO: should get a wrapper, for HTTP errors, and optionalize bookamrks
            lambda: (  self.channel().load(category,reload), reload and self.bookmarks.heuristic_update(self.current_channel,category)  )
        )

    # Thread a function, add to worker pool (for utilizing stop button)
    def thread(self, target, *args):
        thread = Thread(target=target, args=args)
        thread.start()
        self.working.append(thread)


    # Click in category list
    def on_category_clicked(self, widget, event, *more):
        category = self.channel().currentcat()
        __print__(dbg.UI, "on_category_clicked", category, self.current_channel)
        self.on_reload_clicked(None, reload=0)
        pass

    # Add current selection to bookmark store
    def bookmark(self, widget):
        self.bookmarks.add(self.row())
        self.channel().row_icon(gtk.STOCK_ABOUT)







|





|
















|







275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
        url = self.selected("homepage")
        if url and len(url): action.browser(url)
        else: self.status("No homepage URL present.")

    # Browse to channel homepage (double click on notebook tab)
    def on_homepage_channel_clicked(self, widget, event=2):
        if event == 2 or event.type == gtk.gdk._2BUTTON_PRESS:
            log.UI("dblclick")
            url = self.channel().meta.get("url", "https://duckduckgo.com/?q=" + self.channel().module)
            action.browser(url)

    # Reload stream list in current channel-category
    def on_reload_clicked(self, widget=None, reload=1):
        log.UI("on_reload_clicked()", "reload=", reload, "current_channel=", self.current_channel, "c=", self.channels[self.current_channel], "cat=", self.channel().current)
        category = self.channel().current
        self.thread(
                       #@TODO: should get a wrapper, for HTTP errors, and optionalize bookamrks
            lambda: (  self.channel().load(category,reload), reload and self.bookmarks.heuristic_update(self.current_channel,category)  )
        )

    # Thread a function, add to worker pool (for utilizing stop button)
    def thread(self, target, *args):
        thread = Thread(target=target, args=args)
        thread.start()
        self.working.append(thread)


    # Click in category list
    def on_category_clicked(self, widget, event, *more):
        category = self.channel().currentcat()
        log.UI("on_category_clicked", category, self.current_channel)
        self.on_reload_clicked(None, reload=0)
        pass

    # Add current selection to bookmark store
    def bookmark(self, widget):
        self.bookmarks.add(self.row())
        self.channel().row_icon(gtk.STOCK_ABOUT)
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422

            # load defaults on first startup
            if not name in conf.plugins:
                conf.add_plugin_defaults(plugin_meta(module=name), name)
            
            # skip module if disabled
            if conf.plugins.get(name, 1) == False:
                __print__(dbg.STAT, "disabled plugin:", name)
                continue
            # or if it's a built-in (already imported)
            elif name in self.features or name in self.channels:
                continue
            
            # load plugin
            try:
                plugin = __import__("channels."+name, globals(), None, [""])
                #print [name for name,c in inspect.getmembers(plugin) if inspect.isclass(c)]
                plugin_class = plugin.__dict__[name]
                plugin_obj = plugin_class(parent=self)

                # add to .channels{}
                if issubclass(plugin_class, channels.GenericChannel):
                    self.channels[name] = plugin_obj
                # or .features{} for other plugin types
                else:
                    self.features[name] = plugin_obj
                
            except Exception as e:
                __print__(dbg.INIT, "load_plugin_channels: error initializing:", name, ", exception:")
                traceback.print_exc()

    # load application state (widget sizes, selections, etc.)
    def init_app_state(self):

        winlayout = conf.load("window")
        if (winlayout):







|




















|







387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422

            # load defaults on first startup
            if not name in conf.plugins:
                conf.add_plugin_defaults(plugin_meta(module=name), name)
            
            # skip module if disabled
            if conf.plugins.get(name, 1) == False:
                log.STAT("disabled plugin:", name)
                continue
            # or if it's a built-in (already imported)
            elif name in self.features or name in self.channels:
                continue
            
            # load plugin
            try:
                plugin = __import__("channels."+name, globals(), None, [""])
                #print [name for name,c in inspect.getmembers(plugin) if inspect.isclass(c)]
                plugin_class = plugin.__dict__[name]
                plugin_obj = plugin_class(parent=self)

                # add to .channels{}
                if issubclass(plugin_class, channels.GenericChannel):
                    self.channels[name] = plugin_obj
                # or .features{} for other plugin types
                else:
                    self.features[name] = plugin_obj
                
            except Exception as e:
                log.INIT("load_plugin_channels: error initializing:", name, ", exception:")
                traceback.print_exc()

    # load application state (widget sizes, selections, etc.)
    def init_app_state(self):

        winlayout = conf.load("window")
        if (winlayout):
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
        if (conf.get("firstrun")):
            main.configwin.open(None)
            del conf.firstrun

        # run
        gtk.main()
        [callback() for callback in main.hooks["quit"]]
        __print__(dbg.PROC, r" gtk_main_quit ")
        
    # invoke command-line interface
    else:
        import cli
        cli.StreamTunerCLI(conf.args.action)

# run
if __name__ == "__main__":
    main()








|










494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
        if (conf.get("firstrun")):
            main.configwin.open(None)
            del conf.firstrun

        # run
        gtk.main()
        [callback() for callback in main.hooks["quit"]]
        log.PROC(r" gtk_main_quit ")
        
    # invoke command-line interface
    else:
        import cli
        cli.StreamTunerCLI(conf.args.action)

# run
if __name__ == "__main__":
    main()

Modified uikit.py from [59736bfdc8] to [14781849fd].

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
if ver==3:
    from gi import pygtkcompat as pygtk
    pygtk.enable() 
    pygtk.enable_gtk(version='3.0')
    from gi.repository import Gtk as gtk
    from gi.repository import GObject as gobject
    from gi.repository import GdkPixbuf
    __print__(dbg.STAT, gtk)
    __print__(dbg.STAT, gobject)
else:
    import pygtk
    import gtk
    import gobject
    GdkPixbuf = gtk.gdk

# prepare gtkbuilder data







|
|







42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
if ver==3:
    from gi import pygtkcompat as pygtk
    pygtk.enable() 
    pygtk.enable_gtk(version='3.0')
    from gi.repository import Gtk as gtk
    from gi.repository import GObject as gobject
    from gi.repository import GdkPixbuf
    log.STAT(gtk)
    log.STAT(gobject)
else:
    import pygtk
    import gtk
    import gobject
    GdkPixbuf = gtk.gdk

# prepare gtkbuilder data
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
                    # attach cell to column
                    col.pack_end(rend, expand=cell[3].get("expand",True))
                    # apply attributes
                    for attr,val in list(cell[3].items()):
                        col.add_attribute(rend, attr, val)
                    # next
                    datapos += 1
                    #__print__(dbg.INFO, cell, len(cell))

                # add column to treeview
                widget.append_column(col)
            # finalize widget
            widget.set_search_column(5)   #??
            widget.set_search_column(4)   #??
            widget.set_search_column(3)   #??







|







127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
                    # attach cell to column
                    col.pack_end(rend, expand=cell[3].get("expand",True))
                    # apply attributes
                    for attr,val in list(cell[3].items()):
                        col.add_attribute(rend, attr, val)
                    # next
                    datapos += 1
                    #log.INFO(cell, len(cell))

                # add column to treeview
                widget.append_column(col)
            # finalize widget
            widget.set_search_column(5)   #??
            widget.set_search_column(4)   #??
            widget.set_search_column(3)   #??
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
            if (not rowmap):
                for desc in datamap:
                    for var in xrange(2, len(desc)):
                        vartypes.append(desc[var][1])  # content types
                        rowmap.append(desc[var][0])    # dict{} column keys in entries[] list
            # create gtk array storage
            ls = gtk.ListStore(*vartypes)   # could be a TreeStore, too
            #__print__(dbg.UI, vartypes, len(vartypes))
            #__print__(dbg.DATA, rowmap, len(rowmap))
 
            # prepare for missing values, and special variable types
            defaults = {
                str: "",
                unicode: "",
                bool: False,
                int: 0,







|
|







151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
            if (not rowmap):
                for desc in datamap:
                    for var in xrange(2, len(desc)):
                        vartypes.append(desc[var][1])  # content types
                        rowmap.append(desc[var][0])    # dict{} column keys in entries[] list
            # create gtk array storage
            ls = gtk.ListStore(*vartypes)   # could be a TreeStore, too
            #log.UI(vartypes, len(vartypes))
            #log.DATA(rowmap, len(rowmap))
 
            # prepare for missing values, and special variable types
            defaults = {
                str: "",
                unicode: "",
                bool: False,
                int: 0,
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
                    ls.append(row)   # had to be adapted for real TreeStore (would require additional input for grouping/level/parents)

                except:
                    # brute-force typecast
                    ls.append( [va  if ty==gtk.gdk.Pixbuf  else ty(va)   for va,ty in zip(row,vartypes)]  )

            #if entries:
                 #__print__("โ†’", row, len(row))
            
            # apply array to widget
            widget.set_model(ls)
            return ls
            
        pass




    #-- treeview for categories
    #
    # simple two-level treeview display in one column
    # with entries = [main,[sub,sub], title,[...],...]
    #
    @staticmethod     
    def tree(widget, entries, title="category", icon=gtk.STOCK_DIRECTORY):

        # list types
        ls = gtk.TreeStore(str, str)
        #__print__(dbg.DATA, ".tree", entries)

        # add entries
        for entry in entries:
            if isinstance(entry, (str,unicode)):
                main = ls.append(None, [str(entry), icon])
            else:
                for sub_title in entry:







|




















|







192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
                    ls.append(row)   # had to be adapted for real TreeStore (would require additional input for grouping/level/parents)

                except:
                    # brute-force typecast
                    ls.append( [va  if ty==gtk.gdk.Pixbuf  else ty(va)   for va,ty in zip(row,vartypes)]  )

            #if entries:
            #     log.ROWS(row, len(row))
            
            # apply array to widget
            widget.set_model(ls)
            return ls
            
        pass




    #-- treeview for categories
    #
    # simple two-level treeview display in one column
    # with entries = [main,[sub,sub], title,[...],...]
    #
    @staticmethod     
    def tree(widget, entries, title="category", icon=gtk.STOCK_DIRECTORY):

        # list types
        ls = gtk.TreeStore(str, str)
        #log.DATA(".tree", entries)

        # add entries
        for entry in entries:
            if isinstance(entry, (str,unicode)):
                main = ls.append(None, [str(entry), icon])
            else:
                for sub_title in entry: