X-Git-Url: https://git.sommitrealweird.co.uk/rss2maildir.git/blobdiff_plain/95250ed0bbd4f1778709f8482aacaac6639af2d8..aa4dd6a06ba1dd47704ac7dd6bcaa062673162d1:/rss2maildir.py diff --git a/rss2maildir.py b/rss2maildir.py index 4421811..a9abe4d 100755 --- a/rss2maildir.py +++ b/rss2maildir.py @@ -39,51 +39,175 @@ from optparse import OptionParser from ConfigParser import SafeConfigParser from base64 import b64encode -import md5 + +if sys.version_info[0] == 2 and sys.version_info[1] >= 6: + import hashlib as md5 +else: + import md5 import cgi import dbm +import re + from HTMLParser import HTMLParser class HTML2Text(HTMLParser): entities = { - "amp": "&", - "lt": "<", - "gt": ">", - "pound": "£", - "copy": "©", - "apos": "'", - "quot": "\"", - "nbsp": " ", + u'amp': u'&', + u'lt': u'<', + u'gt': u'>', + u'pound': u'£', + u'copy': u'©', + u'apos': u'\'', + u'quot': u'"', + u'nbsp': u' ', + u'ldquo': u'“', + u'rdquo': u'”', + u'lsquo': u'‘', + u'rsquo': u'’', + u'laquo': u'«', + u'raquo': u'»', + u'lsaquo': u'‹', + u'rsaquo': u'›', + u'bull': u'•', + u'middot': u'·', + u'deg': u'°', + u'helip': u'…', + u'trade': u'™', + u'reg': u'®', + u'agrave': u'à', + u'Agrave': u'À', + u'egrave': u'è', + u'Egrave': u'È', + u'igrave': u'ì', + u'Igrave': u'Ì', + u'ograve': u'ò', + u'Ograve': u'Ò', + u'ugrave': u'ù', + u'Ugrave': u'Ù', + u'aacute': u'á', + u'Aacute': u'Á', + u'eacute': u'é', + u'Eacute': u'É', + u'iacute': u'í', + u'Iacute': u'Í', + u'oacute': u'ó', + u'Oacute': u'Ó', + u'uacute': u'ú', + u'Uacute': u'Ú', + u'yactue': u'ý', + u'Yacute': u'Ý', + u'acirc': u'â', + u'Acirc': u'Â', + u'ecirc': u'ê', + u'Ecirc': u'Ê', + u'icirc': u'î', + u'Icirc': u'Î', + u'ocirc': u'ô', + u'Ocirc': u'Ô', + u'ucirc': u'û', + u'Ucirc': u'Û', + u'atilde': u'ã', + u'Atilde': u'Ã', + u'ntilde': u'ñ', + u'Ntilde': u'Ñ', + u'otilde': u'õ', + u'Otilde': u'Õ', + u'auml': u'ä', + u'Auml': u'Ä', + u'euml': u'ë', + u'Euml': u'Ë', + u'iuml': u'ï', + u'Iuml': u'Ï', + u'ouml': u'ö', + u'Ouml': u'Ö', + u'uuml': u'ü', + u'Uuml': u'Ü', + u'yuml': u'ÿ', + u'Yuml': u'Ÿ', + u'iexcl': u'¡', + u'iquest': u'¿', + u'ccedil': u'ç', + u'Ccedil': u'Ç', + u'oelig': u'œ', + u'OElig': u'Œ', + u'szlig': u'ß', + u'oslash': u'ø', + u'Oslash': u'Ø', + u'aring': u'å', + u'Aring': u'Å', + u'aelig': u'æ', + u'AElig': u'Æ', + u'thorn': u'þ', + u'THORN': u'Þ', + u'eth': u'ð', + u'ETH': u'Ð', + u'mdash': u'—', + u'ndash': u'–', + u'sect': u'§', + u'para': u'¶', + u'uarr': u'↑', + u'darr': u'↓', + u'larr': u'←', + u'rarr': u'→', + u'dagger': u'†', + u'Dagger': u'‡', + u'permil': u'‰', + u'prod': u'∏', + u'infin': u'∞', + u'radic': u'√', + u'there4': u'∴', + u'int': u'∫', + u'asymp': u'≈', + u'ne': u'≠', + u'equiv': '≡', + u'le': u'≤', + u'ge': u'≥', + u'loz': u'⋄', + u'sum': u'∑', + u'part': u'∂', + u'prime': u'′', + u'Prime': u'″', + u'harr': u'↔', + u'micro': u'µ', + u'not': u'¬', + u'plusmn': u'±', + u'divide': u'÷', + u'cent': u'¢', + u'euro': u'€', } blockleveltags = [ - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "pre", - "p", - "ul", - "ol", - "dl", - "br", + u'h1', + u'h2', + u'h3', + u'h4', + u'h5', + u'h6', + u'pre', + u'p', + u'ul', + u'ol', + u'dl', + u'li', + u'dt', + u'dd', + u'div', + u'blockquote', ] liststarttags = [ - "ul", - "ol", - "dl", + u'ul', + u'ol', + u'dl', ] cancontainflow = [ - "div", - "li", - "dd", - "blockquote", + u'div', + u'li', + u'dd', + u'blockquote', ] def __init__(self,textwidth=70): @@ -92,7 +216,10 @@ class HTML2Text(HTMLParser): self.textwidth = textwidth self.opentags = [] self.indentlevel = 0 + self.ignorenodata = False self.listcount = [] + self.urls = [] + self.images = {} HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): @@ -101,11 +228,6 @@ class HTML2Text(HTMLParser): # handle starting a new block - unless we're in a block element # that can contain other blocks, we'll assume that we want to close # the container - if tag_name == u'br': - self.handle_curdata() - self.opentags.append(tag_name) - self.opentags.pop() - if len(self.opentags) > 1 and self.opentags[-1] == u'li': self.handle_curdata() @@ -114,6 +236,9 @@ class HTML2Text(HTMLParser): self.listcount.append(1) self.listlevel = len(self.listcount) - 1 + if tag_name == u'dl': + self.indentlevel = self.indentlevel + 4 + if tag_name in self.liststarttags: smallist = self.opentags[-3:-1] smallist.reverse() @@ -131,6 +256,8 @@ class HTML2Text(HTMLParser): self.opentags.pop() self.opentags.append(tag_name) else: + if tag_name == "span": + return listcount = 0 try: listcount = self.listcount[-1] @@ -145,33 +272,106 @@ class HTML2Text(HTMLParser): and self.opentags[-1] == u'dd': self.handle_curdata() self.opentags.pop() - - self.handle_curdata() - self.opentags.append(tag_name) + elif tag_name == u'a': + for attr in attrs: + if attr[0].lower() == u'href': + self.urls.append(attr[1].decode('utf-8')) + self.curdata = self.curdata + u'`' + self.opentags.append(tag_name) + return + elif tag_name == u'img': + self.handle_image(attrs) + return + elif tag_name == u'br': + self.handle_br() + return + else: + # we don't know the tag, so lets avoid handling it! + return def handle_startendtag(self, tag, attrs): if tag.lower() == u'br': + self.handle_br() + elif tag.lower() == u'img': + self.handle_image(attrs) + return + + def handle_br(self): + self.handle_curdata() self.opentags.append(u'br') - self.handle_curdata() # just handle the data, don't do anything else + self.handle_curdata() self.opentags.pop() + def handle_image(self, attrs): + alt = u'' + url = u'' + for attr in attrs: + if attr[0] == 'alt': + if isinstance(attr[1], str): + alt = u'%s' %(attr[1].decode("utf-8")) + else: + alt = attr[1] + elif attr[0] == 'src': + if isinstance(attr[1], str): + url = u'%s' %(attr[1].decode("utf-8")) + else: + url = attr[1] + if url: + if alt: + if self.images.has_key(alt): + if self.images[alt]["url"] == url: + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + while self.images.has_key(alt): + alt = alt + "_" + self.images[alt] = {"url": url} + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + self.images[alt] = {"url": url} + self.curdata = self.curdata \ + + u'|%s|' %(alt,) + else: + if self.images.has_key(url): + self.curdata = self.curdata \ + + u'|%s|' %(url,) + else: + self.images[url] = {} + self.images[url]["url"] =url + self.curdata = self.curdata \ + + u'|%s|' %(url,) + def handle_curdata(self): + if len(self.opentags) == 0: return + tag_thats_done = self.opentags[-1] + if len(self.curdata) == 0: return - if len(self.curdata.strip()) == 0: + if tag_thats_done == u'br': + if len(self.text) == 0 or self.text[-1] != '\n': + self.text = self.text + '\n' + self.ignorenodata = True return - tag_thats_done = self.opentags[-1] + if len(self.curdata.strip()) == 0: + return if tag_thats_done in self.blockleveltags: newlinerequired = self.text != u'' + if self.ignorenodata: + newlinerequired = False + self.ignorenodata = False if newlinerequired: - if newlinerequired \ - and len(self.text) > 2 \ + if tag_thats_done in [u'dt', u'dd', u'li'] \ + and len(self.text) > 1 \ + and self.text[-1] != u'\n': + self.text = self.text + u'\n' + elif len(self.text) > 2 \ and self.text[-1] != u'\n' \ and self.text[-2] != u'\n': self.text = self.text + u'\n\n' @@ -179,7 +379,7 @@ class HTML2Text(HTMLParser): if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]: underline = u'' underlinechar = u'=' - headingtext = self.curdata.encode("utf-8").strip() + headingtext = " ".join(self.curdata.split()) seperator = u'\n' + u' '*self.indentlevel headingtext = seperator.join( \ textwrap.wrap( \ @@ -200,35 +400,45 @@ class HTML2Text(HTMLParser): underline = u' ' * self.indentlevel \ + underlinechar * len(headingtext) self.text = self.text \ - + headingtext.encode("utf-8") + u'\n' \ + + headingtext + u'\n' \ + underline - elif tag_thats_done == u'p': - paragraph = self.curdata.encode("utf-8").strip() + elif tag_thats_done in [u'p', u'div']: + paragraph = unicode( \ + " ".join(self.curdata.strip().encode("utf-8").split()), \ + "utf-8") seperator = u'\n' + u' ' * self.indentlevel self.text = self.text \ + u' ' * self.indentlevel \ - + seperator.join(textwrap.wrap(paragraph, self.textwidth - self.indentlevel)) + + seperator.join( \ + textwrap.wrap( \ + paragraph, self.textwidth - self.indentlevel)) elif tag_thats_done == "pre": - self.text = self.text + self.curdata - elif tag_thats_done == "blockquote": - quote = self.curdata.encode("utf-8").strip() - seperator = u'\n' + u' ' * self.indentlevel + u'> ' + self.text = self.text + unicode( \ + self.curdata.encode("utf-8"), "utf-8") + elif tag_thats_done == u'blockquote': + quote = unicode( \ + " ".join(self.curdata.encode("utf-8").strip().split()), \ + "utf-8") + seperator = u'\n' + u' ' * self.indentlevel + u' ' + if len(self.text) > 0 and self.text[-1] != u'\n': + self.text = self.text + u'\n' self.text = self.text \ - + u'> ' \ + + u' ' \ + seperator.join( \ textwrap.wrap( \ quote, \ self.textwidth - self.indentlevel - 2 \ ) ) + self.curdata = u'' elif tag_thats_done == "li": - item = self.curdata.encode("utf-8").strip() + item = unicode(self.curdata.encode("utf-8").strip(), "utf-8") if len(self.text) > 0 and self.text[-1] != u'\n': self.text = self.text + u'\n' # work out if we're in an ol rather than a ul latesttags = self.opentags[-4:] latesttags.reverse() - isul = False + isul = None for thing in latesttags: if thing == 'ul': isul = True @@ -242,7 +452,7 @@ class HTML2Text(HTMLParser): listindent = 4 listmarker = u' * ' - if not isul: + if isul == False: listmarker = u' %2d. ' %(self.listcount[-1]) self.listcount[-1] = self.listcount[-1] + 1 @@ -260,60 +470,67 @@ class HTML2Text(HTMLParser): ) self.curdata = u'' elif tag_thats_done == u'dt': - definition = self.curdata.encode("utf-8").strip() + definition = unicode(" ".join( \ + self.curdata.encode("utf-8").strip().split()), \ + "utf-8") if len(self.text) > 0 and self.text[-1] != u'\n': self.text = self.text + u'\n\n' elif len(self.text) > 1 and self.text[-2] != u'\n': self.text = self.text + u'\n' - definition = u' ' * self.indentlevel + definition + "::" - indentstring = u'\n' + u' ' * (self.indentlevel + 1) + definition = u' ' * (self.indentlevel - 4) + definition + "::" + indentstring = u'\n' + u' ' * (self.indentlevel - 3) self.text = self.text \ + indentstring.join( textwrap.wrap(definition, \ - self.textwidth - self.indentlevel - 1)) + self.textwidth - self.indentlevel - 4)) self.curdata = u'' elif tag_thats_done == u'dd': - definition = self.curdata.encode("utf-8").strip() + definition = unicode(" ".join( \ + self.curdata.encode("utf-8").strip().split()), + "utf-8") if len(definition) > 0: if len(self.text) > 0 and self.text[-1] != u'\n': self.text = self.text + u'\n' - indentstring = u'\n' + u' ' * (self.indentlevel + 4) + indentstring = u'\n' + u' ' * self.indentlevel self.text = self.text \ - + u' ' * (self.indentlevel + 4) \ + + indentstring \ + indentstring.join( \ textwrap.wrap( \ definition, \ - self.textwidth - self.indentlevel - 4 \ + self.textwidth - self.indentlevel \ ) \ ) self.curdata = u'' + elif tag_thats_done == u'a': + self.curdata = self.curdata + u'`__' + pass elif tag_thats_done in self.liststarttags: pass - else: - # we've got no idea what this tag does, so we'll - # make an assumption that we're not going to know later - if len(self.curdata) > 0: - self.text = self.text \ - + u' ... ' \ - + u'\n ... '.join( \ - textwrap.wrap(self.curdata, self.textwidth - 5)) - self.curdata = u'' if tag_thats_done in self.blockleveltags: self.curdata = u'' + self.ignorenodata = False + def handle_endtag(self, tag): + self.ignorenodata = False + if tag == "span": + return + try: tagindex = self.opentags.index(tag) except: - # closing tag we know nothing about. - # err. weird. - tagindex = 0 - + return tag = tag.lower() + if tag in [u'br', u'img']: + return + + if tag == u'dl': + self.indentlevel = self.indentlevel - 4 + if tag in self.liststarttags: - if tag in [u'ol', u'dl', u'ul']: + if tag in [u'ol', u'dl', u'ul', u'dd']: self.handle_curdata() # find if there was a previous list level smalllist = self.opentags[:-1] @@ -347,18 +564,33 @@ class HTML2Text(HTMLParser): self.opentags.pop() def handle_data(self, data): - self.curdata = self.curdata + unicode(data, "utf-8") + if len(self.opentags) == 0: + self.opentags.append(u'p') + self.curdata = self.curdata + data.decode("utf-8") + + def handle_charref(self, name): + try: + entity = unichr(int(name)) + except: + if name[0] == 'x': + try: + entity = unichr(int('0%s' %(name,), 16)) + except: + entity = u'#%s' %(name,) + else: + entity = u'#%s' %(name,) + self.curdata = self.curdata + unicode(entity.encode('utf-8'), \ + "utf-8") def handle_entityref(self, name): entity = name - if HTML2Text.entities.has_key(name.lower()): - entity = HTML2Text.entities[name.lower()] - elif name[0] == "#": - entity = unichr(int(name[1:])) + if HTML2Text.entities.has_key(name): + entity = HTML2Text.entities[name] else: entity = "&" + name + ";" - self.curdata = self.curdata + unicode(entity, "utf-8") + self.curdata = self.curdata + unicode(entity.encode('utf-8'), \ + "utf-8") def gettext(self): self.handle_curdata() @@ -369,6 +601,15 @@ class HTML2Text(HTMLParser): while len(self.text) > 1 and self.text[-1] == u'\n': self.text = self.text[:-1] self.text = self.text + u'\n' + if len(self.urls) > 0: + self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n' + self.urls = [] + if len(self.images.keys()) > 0: + self.text = self.text + u'\n.. ' \ + + u'\n.. '.join( \ + ["|%s| image:: %s" %(a, self.images[a]["url"]) \ + for a in self.images.keys()]) + u'\n' + self.images = {} return self.text def open_url(method, url): @@ -377,10 +618,17 @@ def open_url(method, url): (type, rest) = urllib.splittype(url) (host, path) = urllib.splithost(rest) (host, port) = urllib.splitport(host) - if port == None: + if type == "https": + if port == None: + port = 443 + elif port == None: port = 80 try: - conn = httplib.HTTPConnection("%s:%s" %(host, port)) + conn = None + if type == "http": + conn = httplib.HTTPConnection("%s:%s" %(host, port)) + else: + conn = httplib.HTTPSConnection("%s:%s" %(host, port)) conn.request(method, path) response = conn.getresponse() if response.status in [301, 302, 303, 307]: @@ -452,23 +700,30 @@ def parse_and_deliver(maildir, url, statedir): if item.has_key("content"): content = item["content"][0]["value"] else: - content = item["summary"] + if item.has_key("description"): + content = item["description"] + else: + content = u'' md5sum = md5.md5(content.encode("utf-8")).hexdigest() prevmessageid = None + db_guid_key = None + db_link_key = (url + u'|' + item["link"]).encode("utf-8") + # check if there's a guid too - if that exists and we match the md5, # return if item.has_key("guid"): - if db.has_key(url + "|" + item["guid"]): - data = db[url + "|" + item["guid"]] + db_guid_key = (url + u'|' + item["guid"]).encode("utf-8") + if db.has_key(db_guid_key): + data = db[db_guid_key] data = cgi.parse_qs(data) if data["contentmd5"][0] == md5sum: continue - if db.has_key(url + "|" + item["link"]): - data = db[url + "|" + item["link"]] + if db.has_key(db_link_key): + data = db[db_link_key] data = cgi.parse_qs(data) if data.has_key("message-id"): prevmessageid = data["message-id"][0] @@ -492,8 +747,8 @@ def parse_and_deliver(maildir, url, statedir): ]) + "@" + socket.gethostname() + ">" msg.add_header("Message-ID", messageid) msg.set_unixfrom("\"%s\" " %(url)) - msg.add_header("From", "\"%s\" " %(author)) - msg.add_header("To", "\"%s\" " %(url)) + msg.add_header("From", "\"%s\" " %(author.encode("utf-8"))) + msg.add_header("To", "\"%s\" " %(url.encode("utf-8"))) if prevmessageid: msg.add_header("References", prevmessageid) createddate = datetime.datetime.now() \ @@ -504,7 +759,14 @@ def parse_and_deliver(maildir, url, statedir): except: pass msg.add_header("Date", createddate) - msg.add_header("Subject", item["title"]) + msg.add_header("X-rss2maildir-rundate", datetime.datetime.now() \ + .strftime("%a, %e %b %Y %T -0000")) + subj_gen = HTML2Text() + title = item["title"] + title = re.sub(u'<', u'<', title) + title = re.sub(u'>', u'>', title) + subj_gen.feed(title.encode("utf-8")) + msg.add_header("Subject", subj_gen.gettext()) msg.set_default_type("text/plain") htmlcontent = content.encode("utf-8") @@ -551,30 +813,31 @@ def parse_and_deliver(maildir, url, statedir): ("created", createddate), \ ("contentmd5", md5sum) \ )) - db[url + "|" + item["guid"]] = data + db[db_guid_key] = data try: - data = db[url + "|" + item["link"]] + data = db[db_link_key] data = cgi.parse_qs(data) newdata = urllib.urlencode(( \ ("message-id", messageid), \ ("created", data["created"][0]), \ ("contentmd5", data["contentmd5"][0]) \ )) - db[url + "|" + item["link"]] = newdata + db[db_link_key] = newdata except: - db[url + "|" + item["link"]] = data + db[db_link_key] = data else: data = urllib.urlencode(( \ ("message-id", messageid), \ ("created", createddate), \ ("contentmd5", md5sum) \ )) - db[url + "|" + item["link"]] = data + db[db_link_key] = data if headers: data = [] for header in headers: - if header[0] in ["content-md5", "etag", "last-modified", "content-length"]: + if header[0] in \ + ["content-md5", "etag", "last-modified", "content-length"]: data.append((header[0], header[1])) if len(data) > 0: data = urllib.urlencode(data) @@ -653,11 +916,13 @@ if __name__ == "__main__": elif scp.has_option("general", "state_dir"): new_state_dir = scp.get("general", "state_dir") try: - mode = os.stat(state_dir)[stat.ST_MODE] + mode = os.stat(new_state_dir)[stat.ST_MODE] if not stat.S_ISDIR(mode): sys.stderr.write( \ "State directory (%s) is not a directory\n" %(state_dir)) sys.exit(1) + else: + state_dir = new_state_dir except: # try to create it try: