]> git.sommitrealweird.co.uk Git - rss2maildir.git/blobdiff - rss2maildir.py
Change header encoding for From/To address to make sure they're utf-8 and so they...
[rss2maildir.git] / rss2maildir.py
index 113d931a1db7bf162f603ce1cc646137a1f97b9c..8a59c8548761e87c06c1befe66a9f3550cf4b07b 100755 (executable)
@@ -39,23 +39,143 @@ from optparse import OptionParser
 from ConfigParser import SafeConfigParser
 
 from base64 import b64encode
-import md5
+
+if sys.version_info[0] == 2 and sys.version_info[1] >= 6:
+    import hashlib as md5
+else:
+    import md5
 
 import cgi
 import dbm
 
+import re
+
 from HTMLParser import HTMLParser
 
 class HTML2Text(HTMLParser):
     entities = {
-        u'amp': "&",
-        u'lt': "<",
-        u'gt': ">",
-        u'pound': "£",
-        u'copy': "©",
-        u'apos': "'",
-        u'quot': "\"",
-        u'nbsp': " ",
+        u'amp': u'&',
+        u'lt': u'<',
+        u'gt': u'>',
+        u'pound': u'£',
+        u'copy': u'©',
+        u'apos': u'\'',
+        u'quot': u'"',
+        u'nbsp': u' ',
+        u'ldquo': u'“',
+        u'rdquo': u'”',
+        u'lsquo': u'‘',
+        u'rsquo': u'’',
+        u'laquo': u'«',
+        u'raquo': u'»',
+        u'lsaquo': u'‹',
+        u'rsaquo': u'›',
+        u'bull': u'•',
+        u'middot': u'·',
+        u'deg': u'°',
+        u'helip': u'…',
+        u'trade': u'™',
+        u'reg': u'®',
+        u'agrave': u'à',
+        u'Agrave': u'À',
+        u'egrave': u'è',
+        u'Egrave': u'È',
+        u'igrave': u'ì',
+        u'Igrave': u'Ì',
+        u'ograve': u'ò',
+        u'Ograve': u'Ò',
+        u'ugrave': u'ù',
+        u'Ugrave': u'Ù',
+        u'aacute': u'á',
+        u'Aacute': u'Á',
+        u'eacute': u'é',
+        u'Eacute': u'É',
+        u'iacute': u'í',
+        u'Iacute': u'Í',
+        u'oacute': u'ó',
+        u'Oacute': u'Ó',
+        u'uacute': u'ú',
+        u'Uacute': u'Ú',
+        u'yactue': u'ý',
+        u'Yacute': u'Ý',
+        u'acirc': u'â',
+        u'Acirc': u'Â',
+        u'ecirc': u'ê',
+        u'Ecirc': u'Ê',
+        u'icirc': u'î',
+        u'Icirc': u'Î',
+        u'ocirc': u'ô',
+        u'Ocirc': u'Ô',
+        u'ucirc': u'û',
+        u'Ucirc': u'Û',
+        u'atilde': u'ã',
+        u'Atilde': u'Ã',
+        u'ntilde': u'ñ',
+        u'Ntilde': u'Ñ',
+        u'otilde': u'õ',
+        u'Otilde': u'Õ',
+        u'auml': u'ä',
+        u'Auml': u'Ä',
+        u'euml': u'ë',
+        u'Euml': u'Ë',
+        u'iuml': u'ï',
+        u'Iuml': u'Ï',
+        u'ouml': u'ö',
+        u'Ouml': u'Ö',
+        u'uuml': u'ü',
+        u'Uuml': u'Ü',
+        u'yuml': u'ÿ',
+        u'Yuml': u'Ÿ',
+        u'iexcl': u'¡',
+        u'iquest': u'¿',
+        u'ccedil': u'ç',
+        u'Ccedil': u'Ç',
+        u'oelig': u'œ',
+        u'OElig': u'Œ',
+        u'szlig': u'ß',
+        u'oslash': u'ø',
+        u'Oslash': u'Ø',
+        u'aring': u'å',
+        u'Aring': u'Å',
+        u'aelig': u'æ',
+        u'AElig': u'Æ',
+        u'thorn': u'þ',
+        u'THORN': u'Þ',
+        u'eth': u'ð',
+        u'ETH': u'Ð',
+        u'mdash': u'—',
+        u'ndash': u'–',
+        u'sect': u'§',
+        u'para': u'¶',
+        u'uarr': u'↑',
+        u'darr': u'↓',
+        u'larr': u'←',
+        u'rarr': u'→',
+        u'dagger': u'†',
+        u'Dagger': u'‡',
+        u'permil': u'‰',
+        u'prod': u'∏',
+        u'infin': u'∞',
+        u'radic': u'√',
+        u'there4': u'∴',
+        u'int': u'∫',
+        u'asymp': u'≈',
+        u'ne': u'≠',
+        u'equiv': '≡',
+        u'le': u'≤',
+        u'ge': u'≥',
+        u'loz': u'⋄',
+        u'sum': u'∑',
+        u'part': u'∂',
+        u'prime': u'′',
+        u'Prime': u'″',
+        u'harr': u'↔',
+        u'micro': u'µ',
+        u'not': u'¬',
+        u'plusmn': u'±',
+        u'divide': u'÷',
+        u'cent': u'¢',
+        u'euro': u'€',
         }
 
     blockleveltags = [
@@ -70,8 +190,11 @@ class HTML2Text(HTMLParser):
         u'ul',
         u'ol',
         u'dl',
+        u'li',
+        u'dt',
+        u'dd',
         u'div',
-        #u'blockquote',
+        u'blockquote',
         ]
 
     liststarttags = [
@@ -96,6 +219,7 @@ class HTML2Text(HTMLParser):
         self.ignorenodata = False
         self.listcount = []
         self.urls = []
+        self.images = {}
         HTMLParser.__init__(self)
 
     def handle_starttag(self, tag, attrs):
@@ -112,6 +236,9 @@ class HTML2Text(HTMLParser):
                 self.listcount.append(1)
                 self.listlevel = len(self.listcount) - 1
 
+            if tag_name == u'dl':
+                self.indentlevel = self.indentlevel + 4
+
             if tag_name in self.liststarttags:
                 smallist = self.opentags[-3:-1]
                 smallist.reverse()
@@ -148,7 +275,7 @@ class HTML2Text(HTMLParser):
             elif tag_name == u'a':
                 for attr in attrs:
                     if attr[0].lower() == u'href':
-                        self.urls.append(attr[1])
+                        self.urls.append(attr[1].decode('utf-8'))
                 self.curdata = self.curdata + u'`'
                 self.opentags.append(tag_name)
                 return
@@ -184,16 +311,30 @@ class HTML2Text(HTMLParser):
             elif attr[0] == 'src':
                 url = attr[1].decode('utf-8')
         if url:
-            self.curdata = self.curdata \
-                + u' [img:' \
-                + url
             if alt:
-                self.curdata = self.curdata \
-                    + u'(' \
-                    + alt \
-                    + u')'
-            self.curdata = self.curdata \
-                + u']'
+                if self.images.has_key(alt):
+                    if self.images[alt]["url"] == url:
+                        self.curdata = self.curdata \
+                            + u'|%s|' %(alt,)
+                    else:
+                        while self.images.has_key(alt):
+                            alt = alt + "_"
+                        self.images[alt] = {"url": url}
+                        self.curdata = self.curdata \
+                            + u'|%s|' %(alt,)
+                else:
+                    self.images[alt] = {"url": url}
+                    self.curdata = self.curdata \
+                        + u'|%s|' %(alt,)
+            else:
+                if self.images.has_key(url):
+                    self.curdata = self.curdata \
+                        + u'|%s|' %(url,)
+                else:
+                    self.images[url] = {}
+                    self.images[url]["url"] =url
+                    self.curdata = self.curdata \
+                        + u'|%s|' %(url,)
 
     def handle_curdata(self):
 
@@ -219,17 +360,20 @@ class HTML2Text(HTMLParser):
             if self.ignorenodata:
                 newlinerequired = False
             self.ignorenodata = False
-            if newlinerequired \
-                and len(self.text) > 2 \
-                and self.text[-1] != u'\n' \
-                and self.text[-2] != u'\n':
+            if newlinerequired:
+                if tag_thats_done in [u'dt', u'dd', u'li'] \
+                    and len(self.text) > 1 \
+                    and self.text[-1] != u'\n':
+                        self.text = self.text + u'\n'
+                elif len(self.text) > 2 \
+                    and self.text[-1] != u'\n' \
+                    and self.text[-2] != u'\n':
                     self.text = self.text + u'\n\n'
 
         if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]:
             underline = u''
             underlinechar = u'='
-            headingtext = unicode( \
-                self.curdata.encode("utf-8").strip(), "utf-8")
+            headingtext = " ".join(self.curdata.split())
             seperator = u'\n' + u' '*self.indentlevel
             headingtext = seperator.join( \
                 textwrap.wrap( \
@@ -250,11 +394,12 @@ class HTML2Text(HTMLParser):
                 underline = u' ' * self.indentlevel \
                     + underlinechar * len(headingtext)
             self.text = self.text \
-                + headingtext.encode("utf-8") + u'\n' \
+                + headingtext + u'\n' \
                 + underline
         elif tag_thats_done in [u'p', u'div']:
             paragraph = unicode( \
-                self.curdata.strip().encode("utf-8"), "utf-8")
+                " ".join(self.curdata.strip().encode("utf-8").split()), \
+                "utf-8")
             seperator = u'\n' + u' ' * self.indentlevel
             self.text = self.text \
                 + u' ' * self.indentlevel \
@@ -266,12 +411,13 @@ class HTML2Text(HTMLParser):
                 self.curdata.encode("utf-8"), "utf-8")
         elif tag_thats_done == u'blockquote':
             quote = unicode( \
-                self.curdata.encode("utf-8").strip(), "utf-8")
-            seperator = u'\n' + u' ' * self.indentlevel + u'> '
+                " ".join(self.curdata.encode("utf-8").strip().split()), \
+                "utf-8")
+            seperator = u'\n' + u' ' * self.indentlevel + u'    '
             if len(self.text) > 0 and self.text[-1] != u'\n':
                 self.text = self.text + u'\n'
             self.text = self.text \
-                + u'> ' \
+                + u'    ' \
                 + seperator.join( \
                     textwrap.wrap( \
                         quote, \
@@ -318,30 +464,34 @@ class HTML2Text(HTMLParser):
                 )
             self.curdata = u''
         elif tag_thats_done == u'dt':
-            definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
+            definition = unicode(" ".join( \
+                    self.curdata.encode("utf-8").strip().split()), \
+                "utf-8")
             if len(self.text) > 0 and self.text[-1] != u'\n':
                 self.text = self.text + u'\n\n'
             elif len(self.text) > 1 and self.text[-2] != u'\n':
                 self.text = self.text + u'\n'
-            definition = u' ' * self.indentlevel + definition + "::"
-            indentstring = u'\n' + u' ' * (self.indentlevel + 1)
+            definition = u' ' * (self.indentlevel - 4) + definition + "::"
+            indentstring = u'\n' + u' ' * (self.indentlevel - 3)
             self.text = self.text \
                 + indentstring.join(
                     textwrap.wrap(definition, \
-                        self.textwidth - self.indentlevel - 1))
+                        self.textwidth - self.indentlevel - 4))
             self.curdata = u''
         elif tag_thats_done == u'dd':
-            definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
+            definition = unicode(" ".join( \
+                    self.curdata.encode("utf-8").strip().split()),
+                "utf-8")
             if len(definition) > 0:
                 if len(self.text) > 0 and self.text[-1] != u'\n':
                     self.text = self.text + u'\n'
-                indentstring = u'\n' + u' ' * (self.indentlevel + 4)
+                indentstring = u'\n' + u' ' * self.indentlevel
                 self.text = self.text \
-                    + u' ' * (self.indentlevel + 4) \
+                    + indentstring \
                     + indentstring.join( \
                         textwrap.wrap( \
                             definition, \
-                            self.textwidth - self.indentlevel - 4 \
+                            self.textwidth - self.indentlevel \
                             ) \
                         )
                 self.curdata = u''
@@ -370,8 +520,11 @@ class HTML2Text(HTMLParser):
         if tag in [u'br', u'img']:
             return
 
+        if tag == u'dl':
+            self.indentlevel = self.indentlevel - 4
+
         if tag in self.liststarttags:
-            if tag in [u'ol', u'dl', u'ul']:
+            if tag in [u'ol', u'dl', u'ul', u'dd']:
                 self.handle_curdata()
                 # find if there was a previous list level
                 smalllist = self.opentags[:-1]
@@ -407,18 +560,31 @@ class HTML2Text(HTMLParser):
     def handle_data(self, data):
         if len(self.opentags) == 0:
             self.opentags.append(u'p')
-        self.curdata = self.curdata + unicode(data, "utf-8")
+        self.curdata = self.curdata + data.decode("utf-8")
+
+    def handle_charref(self, name):
+        try:
+            entity = unichr(int(name))
+        except:
+            if name[0] == 'x':
+                try:
+                    entity = unichr(int('0%s' %(name,), 16))
+                except:
+                    entity = u'#%s' %(name,)
+            else:
+                entity = u'#%s' %(name,)
+        self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+            "utf-8")
 
     def handle_entityref(self, name):
         entity = name
-        if HTML2Text.entities.has_key(name.lower()):
-            entity = HTML2Text.entities[name.lower()]
-        elif name[0] == "#":
-            entity = unichr(int(name[1:]))
+        if HTML2Text.entities.has_key(name):
+            entity = HTML2Text.entities[name]
         else:
             entity = "&" + name + ";"
 
-        self.curdata = self.curdata + unicode(entity, "utf-8")
+        self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+            "utf-8")
 
     def gettext(self):
         self.handle_curdata()
@@ -432,6 +598,12 @@ class HTML2Text(HTMLParser):
         if len(self.urls) > 0:
             self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n'
             self.urls = []
+        if len(self.images.keys()) > 0:
+            self.text = self.text + u'\n.. ' \
+                + u'\n.. '.join( \
+                    ["|%s| image:: %s" %(a, self.images[a]["url"]) \
+                for a in self.images.keys()]) + u'\n'
+            self.images = {}
         return self.text
 
 def open_url(method, url):
@@ -440,10 +612,17 @@ def open_url(method, url):
         (type, rest) = urllib.splittype(url)
         (host, path) = urllib.splithost(rest)
         (host, port) = urllib.splitport(host)
-        if port == None:
+        if type == "https":
+            if port == None:
+                port = 443
+        elif port == None:
             port = 80
         try:
-            conn = httplib.HTTPConnection("%s:%s" %(host, port))
+            conn = None
+            if type == "http":
+                conn = httplib.HTTPConnection("%s:%s" %(host, port))
+            else:
+                conn = httplib.HTTPSConnection("%s:%s" %(host, port))
             conn.request(method, path)
             response = conn.getresponse()
             if response.status in [301, 302, 303, 307]:
@@ -515,23 +694,30 @@ def parse_and_deliver(maildir, url, statedir):
         if item.has_key("content"):
             content = item["content"][0]["value"]
         else:
-            content = item["summary"]
+            if item.has_key("description"):
+                content = item["description"]
+            else:
+                content = u''
 
         md5sum = md5.md5(content.encode("utf-8")).hexdigest()
 
         prevmessageid = None
 
+        db_guid_key = None
+        db_link_key = (url + u'|' + item["link"]).encode("utf-8")
+
         # check if there's a guid too - if that exists and we match the md5,
         # return
         if item.has_key("guid"):
-            if db.has_key(url + "|" + item["guid"]):
-                data = db[url + "|" + item["guid"]]
+            db_guid_key = (url + u'|' + item["guid"]).encode("utf-8")
+            if db.has_key(db_guid_key):
+                data = db[db_guid_key]
                 data = cgi.parse_qs(data)
                 if data["contentmd5"][0] == md5sum:
                     continue
 
-        if db.has_key(url + "|" + item["link"]):
-            data = db[url + "|" + item["link"]]
+        if db.has_key(db_link_key):
+            data = db[db_link_key]
             data = cgi.parse_qs(data)
             if data.has_key("message-id"):
                 prevmessageid = data["message-id"][0]
@@ -555,8 +741,8 @@ def parse_and_deliver(maildir, url, statedir):
                 ]) + "@" + socket.gethostname() + ">"
         msg.add_header("Message-ID", messageid)
         msg.set_unixfrom("\"%s\" <rss2maildir@localhost>" %(url))
-        msg.add_header("From", "\"%s\" <rss2maildir@localhost>" %(author))
-        msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url))
+        msg.add_header("From", "\"%s\" <rss2maildir@localhost>" %(author.encode("utf-8")))
+        msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url.encode("utf-8")))
         if prevmessageid:
             msg.add_header("References", prevmessageid)
         createddate = datetime.datetime.now() \
@@ -567,7 +753,14 @@ def parse_and_deliver(maildir, url, statedir):
         except:
             pass
         msg.add_header("Date", createddate)
-        msg.add_header("Subject", item["title"])
+        msg.add_header("X-rss2maildir-rundate", datetime.datetime.now() \
+            .strftime("%a, %e %b %Y %T -0000"))
+        subj_gen = HTML2Text()
+        title = item["title"]
+        title = re.sub(u'<', u'&lt;', title)
+        title = re.sub(u'>', u'&gt;', title)
+        subj_gen.feed(title.encode("utf-8"))
+        msg.add_header("Subject", subj_gen.gettext())
         msg.set_default_type("text/plain")
 
         htmlcontent = content.encode("utf-8")
@@ -614,25 +807,25 @@ def parse_and_deliver(maildir, url, statedir):
                 ("created", createddate), \
                 ("contentmd5", md5sum) \
                 ))
-            db[url + "|" + item["guid"]] = data
+            db[db_guid_key] = data
             try:
-                data = db[url + "|" + item["link"]]
+                data = db[db_link_key]
                 data = cgi.parse_qs(data)
                 newdata = urllib.urlencode(( \
                     ("message-id", messageid), \
                     ("created", data["created"][0]), \
                     ("contentmd5", data["contentmd5"][0]) \
                     ))
-                db[url + "|" + item["link"]] = newdata
+                db[db_link_key] = newdata
             except:
-                db[url + "|" + item["link"]] = data
+                db[db_link_key] = data
         else:
             data = urllib.urlencode(( \
                 ("message-id", messageid), \
                 ("created", createddate), \
                 ("contentmd5", md5sum) \
                 ))
-            db[url + "|" + item["link"]] = data
+            db[db_link_key] = data
 
     if headers:
         data = []
@@ -717,11 +910,13 @@ if __name__ == "__main__":
     elif scp.has_option("general", "state_dir"):
         new_state_dir = scp.get("general", "state_dir")
         try:
-            mode = os.stat(state_dir)[stat.ST_MODE]
+            mode = os.stat(new_state_dir)[stat.ST_MODE]
             if not stat.S_ISDIR(mode):
                 sys.stderr.write( \
                     "State directory (%s) is not a directory\n" %(state_dir))
                 sys.exit(1)
+            else:
+                state_dir = new_state_dir
         except:
             # try to create it
             try: