Check-in [0cf2739b13]
Overview
| Comment: | Live365 fixed by Abhisek Sanyal | 
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive | 
| Timelines: | family | ancestors | descendants | both | trunk | 
| Files: | files | file ages | folders | 
| SHA1: | 
0cf2739b1393e614b40e4d3f431af0e3 | 
| User & Date: | mario on 2014-01-04 22:37:55 | 
| Other Links: | manifest | tags | 
Context
| 
   2014-01-05 
 | ||
| 01:23 | fix Pixbuf has_alpha bug due to redundant typecast prior liststore appending check-in: caee086b9c user: mario tags: trunk, 2.0.9 | |
| 
   2014-01-04 
 | ||
| 22:37 | Live365 fixed by Abhisek Sanyal check-in: 0cf2739b13 user: mario tags: trunk | |
| 22:25 | live365 now searches javascript check-in: f0cd5d48f2 user: mario tags: trunk | |
Changes
Modified channels/live365.py from [f126b8da6a] to [9e6de8cad1].
  | 
  | < | | < < < > <  | 1 2 3 4 5 6 7 8 9 10 11 12  | # api: st2 # title: live365 channel # # 2.0.9 fixed by Abhisek Sanyal # # streamtuner2 modules from config import conf from mygtk import mygtk  | 
| ︙ | ︙ | |||
62 63 64 65 66 67 68  | 
            
            # superclass
            ChannelPlugin.__init__(self, parent)
        # read category thread from /listen/browse.live
        def update_categories(self):
 | > | > > > > > > > > > > > > > > > > > > > > > > > > > | | | | > | > > | < > | | < < | < < < < | < < < | > > > | | | < | | < < > > | > | > > | < |  | 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152  | 
            
            # superclass
            ChannelPlugin.__init__(self, parent)
        # read category thread from /listen/browse.live
        def update_categories(self):
            self.categories = []
            # fetch page
            html = http.get("http://www.live365.com/index.live", feedback=self.parent.status);
            rx_genre = re.compile("""
                href=['"]/genres/([\w\d%+]+)['"][^>]*>
                (   (?:<nobr>)?   )
                ( \w[-\w\ /'.&]+ )
                (   (?:</a>)?   )
            """, re.X|re.S)
            # collect
            last = []
            for uu in rx_genre.findall(html):
                (link, sub, title, main) = uu
                # main
                if main and not sub:
                    self.categories.append(title)
                    self.categories.append(last)
                    last = []
                # subcat
                else:
                    last.append(title)
            # don't forget last entries
            self.categories.append(last)
        # extract stream infos
        def update_streams(self, cat, search=""):
        
            # search / url
            if (not search):
                url = "http://www.live365.com/cgi-bin/directory.cgi?first=1&rows=200&mode=2&genre=" + self.cat2tag(cat)
            else:
                url = "http://www.live365.com/cgi-bin/directory.cgi?site=..&searchdesc=" + urllib.quote(search) + "&searchgenre=" + self.cat2tag(cat) + "&x=0&y=0"
            html = http.get(url, feedback=self.parent.status)
            # we only need to download one page, because live365 always only gives 200 results
	    
            # terse format            
            rx = re.compile(r"""
            ['"](OK|PM_ONLY|SUBSCRIPTION).*?
            href=['"](http://www.live365.com/stations/\w+)['"].*?
            page['"]>([^<>]*)</a>.*?
            CLASS=['"]genre-link['"][^>]*>(.+?)</a>.+?
            &station_id=(\d+).+?
            class=["']desc-link['"][^>]+>([^<>]*)<.*?
            =["']audioQuality.+?>(\d+)\w<.+?
            >DrawListenerStars\((\d+),.+?
            >DrawRatingStars\((\d+),\s+(\d+),.*?
                """, re.X|re.I|re.S|re.M)
#            src="(http://www.live365.com/.+?/stationlogo\w+.jpg)".+?
            # append entries to result list
            __print__( html )
            ls = []
            for row in rx.findall(html):
                __print__( row )
                points = int(row[8])
                count = int(row[9])
                ls.append({
                    "launch_id": row[0],
                    "sofo": row[0],  # subscribe-or-fuck-off status flags
                    "state":  (""  if  row[0]=="OK"  else  gtk.STOCK_STOP),
                    "homepage": entity_decode(row[1]),
                    "title": entity_decode(row[2]),
                    "genre": self.strip_tags(row[3]),
                    "bitrate": int(row[6]),
                    "listeners": int(row[7]),
                    "max": 0,
                    "rating": (points + count**0.4) / (count - 0.001*(count-0.1)),   # prevents division by null, and slightly weights (more votes are higher scored than single votes)
                    "rating_points": points,
                    "rating_count": count,
                    # id for URL:
                    "station_id": row[4],
                    "url": self.base_url + "play/" + row[4],
                    "description": entity_decode(row[5]),
                   #"playing": row[10],
                   # "deleted": row[0] != "OK",
                })
            return ls
            
        # faster if we do it in _update() prematurely
        #def prepare(self, ls):
        #    GenericChannel.prepare(ls)
        #    for row in ls:
 | 
| ︙ | ︙ |