X-Git-Url: https://git.sommitrealweird.co.uk/rss2maildir.git/blobdiff_plain/71fa50baa5fc33670e69b92047f1e7cd2f189c75..6eb051aa19ec7dfb7a67c858e06c4d0078e16d22:/rss2maildir.py?ds=sidebyside diff --git a/rss2maildir.py b/rss2maildir.py index 8f76893..e8883b1 100755 --- a/rss2maildir.py +++ b/rss2maildir.py @@ -48,42 +48,46 @@ from HTMLParser import HTMLParser class HTML2Text(HTMLParser): entities = { - "amp": "&", - "lt": "<", - "gt": ">", - "pound": "£", - "copy": "©", - "apos": "'", - "quot": "\"", - "nbsp": " ", + u'amp': "&", + u'lt': "<", + u'gt': ">", + u'pound': "£", + u'copy': "©", + u'apos': "'", + u'quot': "\"", + u'nbsp': " ", } blockleveltags = [ - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "pre", - "p", - "ul", - "ol", - "dl", - "br", + u'h1', + u'h2', + u'h3', + u'h4', + u'h5', + u'h6', + u'pre', + u'p', + u'ul', + u'ol', + u'dl', + u'li', + u'dt', + u'dd', + u'div', + #u'blockquote', ] liststarttags = [ - "ul", - "ol", - "dl", + u'ul', + u'ol', + u'dl', ] cancontainflow = [ - "div", - "li", - "dd", - "blockquote", + u'div', + u'li', + u'dd', + u'blockquote', ] def __init__(self,textwidth=70): @@ -92,7 +96,10 @@ class HTML2Text(HTMLParser): self.textwidth = textwidth self.opentags = [] self.indentlevel = 0 + self.ignorenodata = False self.listcount = [] + self.urls = [] + self.images = {} HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): @@ -101,11 +108,6 @@ class HTML2Text(HTMLParser): # handle starting a new block - unless we're in a block element # that can contain other blocks, we'll assume that we want to close # the container - if tag_name == u'br': - self.handle_curdata() - self.opentags.append(tag_name) - self.opentags.pop() - if len(self.opentags) > 1 and self.opentags[-1] == u'li': self.handle_curdata() @@ -131,6 +133,8 @@ class HTML2Text(HTMLParser): self.opentags.pop() self.opentags.append(tag_name) else: + if tag_name == "span": + return listcount = 0 try: listcount = self.listcount[-1] @@ -145,33 +149,101 @@ class HTML2Text(HTMLParser): and self.opentags[-1] == u'dd': self.handle_curdata() self.opentags.pop() - - self.handle_curdata() - self.opentags.append(tag_name) + elif tag_name == u'a': + for attr in attrs: + if attr[0].lower() == u'href': + self.urls.append(attr[1].decode('utf-8')) + self.curdata = self.curdata + u'`' + self.opentags.append(tag_name) + return + elif tag_name == u'img': + self.handle_image(attrs) + return + elif tag_name == u'br': + self.handle_br() + return + else: + # we don't know the tag, so lets avoid handling it! + return def handle_startendtag(self, tag, attrs): if tag.lower() == u'br': + self.handle_br() + elif tag.lower() == u'img': + self.handle_image(attrs) + return + + def handle_br(self): + self.handle_curdata() self.opentags.append(u'br') - self.handle_curdata() # just handle the data, don't do anything else + self.handle_curdata() self.opentags.pop() + def handle_image(self, attrs): + alt = u'' + url = u'' + for attr in attrs: + if attr[0] == 'alt': + alt = attr[1].decode('utf-8') + elif attr[0] == 'src': + url = attr[1].decode('utf-8') + if url: + if alt: + if self.images.has_key(alt): + if self.images[alt]["url"] == url: + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + while self.images.has_key(alt): + alt = alt + "_" + self.images[alt]["url"] = url + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + self.images[alt] = {} + self.images[alt]["url"] = url + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + if self.images.has_key(url): + self.curdata = self.curdata \ + + u'|%s|' %(url,) + else: + self.images[url] = {} + self.images[url]["url"] =url + self.curdata = self.curdata \ + + u'|%s|' %(url,) + def handle_curdata(self): + if len(self.opentags) == 0: return + tag_thats_done = self.opentags[-1] + if len(self.curdata) == 0: return - if len(self.curdata.strip()) == 0: + if tag_thats_done == u'br': + if len(self.text) == 0 or self.text[-1] != '\n': + self.text = self.text + '\n' + self.ignorenodata = True return - tag_thats_done = self.opentags[-1] + if len(self.curdata.strip()) == 0: + return if tag_thats_done in self.blockleveltags: newlinerequired = self.text != u'' + if self.ignorenodata: + newlinerequired = False + self.ignorenodata = False if newlinerequired: - if newlinerequired \ - and len(self.text) > 2 \ + if tag_thats_done in [u'dt', u'dd', u'li'] \ + and len(self.text) > 1 \ + and self.text[-1] != u'\n': + self.text = self.text + u'\n' + elif len(self.text) > 2 \ and self.text[-1] != u'\n' \ and self.text[-2] != u'\n': self.text = self.text + u'\n\n' @@ -179,8 +251,7 @@ class HTML2Text(HTMLParser): if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]: underline = u'' underlinechar = u'=' - headingtext = unicode( \ - self.curdata.encode("utf-8").strip(), "utf-8") + headingtext = " ".join(self.curdata.split()) seperator = u'\n' + u' '*self.indentlevel headingtext = seperator.join( \ textwrap.wrap( \ @@ -201,11 +272,12 @@ class HTML2Text(HTMLParser): underline = u' ' * self.indentlevel \ + underlinechar * len(headingtext) self.text = self.text \ - + headingtext.encode("utf-8") + u'\n' \ + + headingtext + u'\n' \ + underline - elif tag_thats_done == u'p': + elif tag_thats_done in [u'p', u'div']: paragraph = unicode( \ - self.curdata.strip().encode("utf-8"), "utf-8") + " ".join(self.curdata.strip().encode("utf-8").split()), \ + "utf-8") seperator = u'\n' + u' ' * self.indentlevel self.text = self.text \ + u' ' * self.indentlevel \ @@ -215,10 +287,13 @@ class HTML2Text(HTMLParser): elif tag_thats_done == "pre": self.text = self.text + unicode( \ self.curdata.encode("utf-8"), "utf-8") - elif tag_thats_done == "blockquote": + elif tag_thats_done == u'blockquote': quote = unicode( \ - self.curdata.encode("utf-8").strip(), "utf-8") + " ".join(self.curdata.encode("utf-8").strip().split()), \ + "utf-8") seperator = u'\n' + u' ' * self.indentlevel + u'> ' + if len(self.text) > 0 and self.text[-1] != u'\n': + self.text = self.text + u'\n' self.text = self.text \ + u'> ' \ + seperator.join( \ @@ -227,6 +302,7 @@ class HTML2Text(HTMLParser): self.textwidth - self.indentlevel - 2 \ ) ) + self.curdata = u'' elif tag_thats_done == "li": item = unicode(self.curdata.encode("utf-8").strip(), "utf-8") if len(self.text) > 0 and self.text[-1] != u'\n': @@ -266,7 +342,9 @@ class HTML2Text(HTMLParser): ) self.curdata = u'' elif tag_thats_done == u'dt': - definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8") + definition = unicode(" ".join( \ + self.curdata.encode("utf-8").strip().split()), \ + "utf-8") if len(self.text) > 0 and self.text[-1] != u'\n': self.text = self.text + u'\n\n' elif len(self.text) > 1 and self.text[-2] != u'\n': @@ -279,7 +357,9 @@ class HTML2Text(HTMLParser): self.textwidth - self.indentlevel - 1)) self.curdata = u'' elif tag_thats_done == u'dd': - definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8") + definition = unicode(" ".join( \ + self.curdata.encode("utf-8").strip().split()), + "utf-8") if len(definition) > 0: if len(self.text) > 0 and self.text[-1] != u'\n': self.text = self.text + u'\n' @@ -293,34 +373,31 @@ class HTML2Text(HTMLParser): ) \ ) self.curdata = u'' + elif tag_thats_done == u'a': + self.curdata = self.curdata + u'`__' + pass elif tag_thats_done in self.liststarttags: pass - else: - # we've got no idea what this tag does, so we'll - # make an assumption that we're not going to know later - if len(self.curdata) > 0: - self.text = self.text \ - + u' ... ' \ - + u'\n ... '.join( \ - textwrap.wrap( \ - unicode( \ - self.curdata.encode("utf-8").strip(), \ - "utf-8"), self.textwidth - 5)) - self.curdata = u'' if tag_thats_done in self.blockleveltags: self.curdata = u'' + self.ignorenodata = False + def handle_endtag(self, tag): + self.ignorenodata = False + if tag == "span": + return + try: tagindex = self.opentags.index(tag) except: - # closing tag we know nothing about. - # err. weird. - tagindex = 0 - + return tag = tag.lower() + if tag in [u'br', u'img']: + return + if tag in self.liststarttags: if tag in [u'ol', u'dl', u'ul']: self.handle_curdata() @@ -356,7 +433,9 @@ class HTML2Text(HTMLParser): self.opentags.pop() def handle_data(self, data): - self.curdata = self.curdata + unicode(data, "utf-8") + if len(self.opentags) == 0: + self.opentags.append(u'p') + self.curdata = self.curdata + data.decode("utf-8") def handle_entityref(self, name): entity = name @@ -378,6 +457,15 @@ class HTML2Text(HTMLParser): while len(self.text) > 1 and self.text[-1] == u'\n': self.text = self.text[:-1] self.text = self.text + u'\n' + if len(self.urls) > 0: + self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n' + self.urls = [] + if len(self.images.keys()) > 0: + self.text = self.text + u'\n.. ' \ + + u'.. '.join( \ + ["|%s| image:: %s" %(a, self.images[a]["url"]) \ + for a in self.images.keys()]) + u'\n' + self.images = {} return self.text def open_url(method, url): @@ -663,11 +751,13 @@ if __name__ == "__main__": elif scp.has_option("general", "state_dir"): new_state_dir = scp.get("general", "state_dir") try: - mode = os.stat(state_dir)[stat.ST_MODE] + mode = os.stat(new_state_dir)[stat.ST_MODE] if not stat.S_ISDIR(mode): sys.stderr.write( \ "State directory (%s) is not a directory\n" %(state_dir)) sys.exit(1) + else: + state_dir = new_state_dir except: # try to create it try: