* Add item url to html parts
[rss2maildir.git] / rss2maildir.py
index 25ddb3c676cb8de3eb07d2502ed41d27be1087e0..97623535e64bc96bd118223d5c983c01a9a47e62 100755 (executable)
@@ -20,6 +20,7 @@
 import sys
 import os
 import stat
+import httplib
 import urllib
 
 import feedparser
@@ -52,12 +53,12 @@ entities = {
     "pound": "£",
     "copy": "©",
     "apos": "'",
-    "quote": "\"",
+    "quot": "\"",
     "nbsp": " ",
     }
 
 class HTML2Text(HTMLParser):
-    
+
     def __init__(self):
         self.inheadingone = False
         self.inheadingtwo = False
@@ -251,8 +252,79 @@ class HTML2Text(HTMLParser):
             data = data + "\n".join(textwrap.wrap(self.currentparagraph, 70))
         return data
 
+def open_url(method, url):
+    redirectcount = 0
+    while redirectcount < 3:
+        (type, rest) = urllib.splittype(url)
+        (host, path) = urllib.splithost(rest)
+        (host, port) = urllib.splitport(host)
+        if port == None:
+            port = 80
+        try:
+            conn = httplib.HTTPConnection("%s:%s" %(host, port))
+            conn.request(method, path)
+            response = conn.getresponse()
+            if response.status in [301, 302, 303, 307]:
+                headers = response.getheaders()
+                for header in headers:
+                    if header[0] == "location":
+                        url = header[1]
+            elif response.status == 200:
+                return response
+        except:
+            pass
+        redirectcount = redirectcount + 1
+    return None
+
 def parse_and_deliver(maildir, url, statedir):
-    fp = feedparser.parse(url)
+    feedhandle = None
+    headers = None
+    # first check if we know about this feed already
+    feeddb = dbm.open(os.path.join(statedir, "feeds"), "c")
+    if feeddb.has_key(url):
+        data = feeddb[url]
+        data = cgi.parse_qs(data)
+        response = open_url("HEAD", url)
+        headers = None
+        if response:
+            headers = response.getheaders()
+        ischanged = False
+        try:
+            for header in headers:
+                if header[0] == "content-length":
+                    if header[1] != data["content-length"][0]:
+                        ischanged = True
+                elif header[0] == "etag":
+                    if header[1] != data["etag"][0]:
+                        ischanged = True
+                elif header[0] == "last-modified":
+                    if header[1] != data["last-modified"][0]:
+                        ischanged = True
+                elif header[0] == "content-md5":
+                    if header[1] != data["content-md5"][0]:
+                        ischanged = True
+        except:
+            ischanged = True
+        if ischanged:
+            response = open_url("GET", url)
+            if response != None:
+                headers = response.getheaders()
+                feedhandle = response
+            else:
+                sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+                return
+        else:
+            return # don't need to do anything, nothings changed.
+    else:
+        response = open_url("GET", url)
+        if response != None:
+            headers = response.getheaders()
+            feedhandle = response
+        else:
+            sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+            return
+
+    fp = feedparser.parse(feedhandle)
     db = dbm.open(os.path.join(statedir, "seen"), "c")
     for item in fp["items"]:
         # have we seen it before?
@@ -267,6 +339,15 @@ def parse_and_deliver(maildir, url, statedir):
 
         prevmessageid = None
 
+        # check if there's a guid too - if that exists and we match the md5,
+        # return
+        if item.has_key("guid"):
+            if db.has_key(url + "|" + item["guid"]):
+                data = db[url + "|" + item["guid"]]
+                data = cgi.parse_qs(data)
+                if data["contentmd5"][0] == md5sum:
+                    continue
+
         if db.has_key(url + "|" + item["link"]):
             data = db[url + "|" + item["link"]]
             data = cgi.parse_qs(data)
@@ -296,16 +377,29 @@ def parse_and_deliver(maildir, url, statedir):
         msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url))
         if prevmessageid:
             msg.add_header("References", prevmessageid)
-        createddate = datetime.datetime(*item["updated_parsed"][0:6]) \
+        createddate = datetime.datetime.now() \
             .strftime("%a, %e %b %Y %T -0000")
+        try:
+            createddate = datetime.datetime(*item["updated_parsed"][0:6]) \
+                .strftime("%a, %e %b %Y %T -0000")
+        except:
+            pass
         msg.add_header("Date", createddate)
         msg.add_header("Subject", item["title"])
         msg.set_default_type("text/plain")
 
-        htmlpart = MIMEText(content.encode("utf-8"), "html", "utf-8")
+        htmlcontent = content.encode("utf-8")
+        htmlcontent = "%s\n\n<p>Item URL: <a href='%s'>%s</a></p>" %( \
+            content, \
+            item["link"], \
+            item["link"] )
+        htmlpart = MIMEText(htmlcontent.encode("utf-8"), "html", "utf-8")
         textparser = HTML2Text()
         textparser.feed(content.encode("utf-8"))
         textcontent = textparser.gettext()
+        textcontent = "%s\n\nItem URL: %s" %( \
+            textcontent, \
+            item["link"] )
         textpart = MIMEText(textcontent.encode("utf-8"), "plain", "utf-8")
         msg.attach(textpart)
         msg.attach(htmlpart)
@@ -332,14 +426,43 @@ def parse_and_deliver(maildir, url, statedir):
         # now add to the database about the item
         if prevmessageid:
             messageid = prevmessageid + " " + messageid
-        data = urllib.urlencode((
-            ("message-id", messageid), \
-            ("created", createddate), \
-            ("contentmd5", md5sum) \
-            ))
-        db[url + "|" + item["link"]] = data
+        if item.has_key("guid") and item["guid"] != item["link"]:
+            data = urllib.urlencode(( \
+                ("message-id", messageid), \
+                ("created", createddate), \
+                ("contentmd5", md5sum) \
+                ))
+            db[url + "|" + item["guid"]] = data
+            try:
+                data = db[url + "|" + item["link"]]
+                data = cgi.parse_qs(data)
+                newdata = urllib.urlencode(( \
+                    ("message-id", messageid), \
+                    ("created", data["created"][0]), \
+                    ("contentmd5", data["contentmd5"][0]) \
+                    ))
+                db[url + "|" + item["link"]] = newdata
+            except:
+                db[url + "|" + item["link"]] = data
+        else:
+            data = urllib.urlencode(( \
+                ("message-id", messageid), \
+                ("created", createddate), \
+                ("contentmd5", md5sum) \
+                ))
+            db[url + "|" + item["link"]] = data
+
+    if headers:
+        data = []
+        for header in headers:
+            if header[0] in ["content-md5", "etag", "last-modified", "content-length"]:
+                data.append((header[0], header[1]))
+        if len(data) > 0:
+            data = urllib.urlencode(data)
+            feeddb[url] = data
 
     db.close()
+    feeddb.close()
 
 # first off, parse the command line arguments