X-Git-Url: https://git.sommitrealweird.co.uk/rss2maildir.git/blobdiff_plain/e3114c366ffcca31fedc33388dd887043e7e0af7..f8d24fa18a94f935fea43089ecc3eb68f916c2f8:/rss2maildir.py?ds=inline diff --git a/rss2maildir.py b/rss2maildir.py index 268d192..9762353 100755 --- a/rss2maildir.py +++ b/rss2maildir.py @@ -20,6 +20,7 @@ import sys import os import stat +import httplib import urllib import feedparser @@ -52,12 +53,12 @@ entities = { "pound": "£", "copy": "©", "apos": "'", - "quote": "\"", + "quot": "\"", "nbsp": " ", } class HTML2Text(HTMLParser): - + def __init__(self): self.inheadingone = False self.inheadingtwo = False @@ -88,23 +89,7 @@ class HTML2Text(HTMLParser): elif tag.lower() == "a": self.inlink = True elif tag.lower() == "br": - if self.inparagraph: - self.text = self.text \ - + u'\n'.join( \ - textwrap.wrap(self.currentparagraph, 70)) \ - + u'\n' - self.currentparagraph = "" - elif self.inblockquote: - self.text = self.text \ - + u'\n> ' \ - + u'\n> '.join( \ - [a.strip() \ - for a in textwrap.wrap(self.blockquote, 68) \ - ]) \ - + u'\n' - self.blockquote = u'' - else: - self.text = self.text + u'\n' + self.handle_br() elif tag.lower() == "blockquote": self.inblockquote = True self.text = self.text + u'\n' @@ -139,6 +124,9 @@ class HTML2Text(HTMLParser): def handle_startendtag(self, tag, attrs): if tag.lower() == "br": + self.handle_br() + + def handle_br(self): if self.inparagraph: self.text = self.text \ + u'\n'.join( \ @@ -264,8 +252,79 @@ class HTML2Text(HTMLParser): data = data + "\n".join(textwrap.wrap(self.currentparagraph, 70)) return data +def open_url(method, url): + redirectcount = 0 + while redirectcount < 3: + (type, rest) = urllib.splittype(url) + (host, path) = urllib.splithost(rest) + (host, port) = urllib.splitport(host) + if port == None: + port = 80 + try: + conn = httplib.HTTPConnection("%s:%s" %(host, port)) + conn.request(method, path) + response = conn.getresponse() + if response.status in [301, 302, 303, 307]: + headers = response.getheaders() + for header in headers: + if header[0] == "location": + url = header[1] + elif response.status == 200: + return response + except: + pass + redirectcount = redirectcount + 1 + return None + def parse_and_deliver(maildir, url, statedir): - fp = feedparser.parse(url) + feedhandle = None + headers = None + # first check if we know about this feed already + feeddb = dbm.open(os.path.join(statedir, "feeds"), "c") + if feeddb.has_key(url): + data = feeddb[url] + data = cgi.parse_qs(data) + response = open_url("HEAD", url) + headers = None + if response: + headers = response.getheaders() + ischanged = False + try: + for header in headers: + if header[0] == "content-length": + if header[1] != data["content-length"][0]: + ischanged = True + elif header[0] == "etag": + if header[1] != data["etag"][0]: + ischanged = True + elif header[0] == "last-modified": + if header[1] != data["last-modified"][0]: + ischanged = True + elif header[0] == "content-md5": + if header[1] != data["content-md5"][0]: + ischanged = True + except: + ischanged = True + if ischanged: + response = open_url("GET", url) + if response != None: + headers = response.getheaders() + feedhandle = response + else: + sys.stderr.write("Failed to fetch feed: %s\n" %(url)) + return + else: + return # don't need to do anything, nothings changed. + else: + response = open_url("GET", url) + if response != None: + headers = response.getheaders() + feedhandle = response + else: + sys.stderr.write("Failed to fetch feed: %s\n" %(url)) + return + + fp = feedparser.parse(feedhandle) db = dbm.open(os.path.join(statedir, "seen"), "c") for item in fp["items"]: # have we seen it before? @@ -278,9 +337,22 @@ def parse_and_deliver(maildir, url, statedir): md5sum = md5.md5(content.encode("utf-8")).hexdigest() + prevmessageid = None + + # check if there's a guid too - if that exists and we match the md5, + # return + if item.has_key("guid"): + if db.has_key(url + "|" + item["guid"]): + data = db[url + "|" + item["guid"]] + data = cgi.parse_qs(data) + if data["contentmd5"][0] == md5sum: + continue + if db.has_key(url + "|" + item["link"]): data = db[url + "|" + item["link"]] data = cgi.parse_qs(data) + if data.has_key("message-id"): + prevmessageid = data["message-id"][0] if data["contentmd5"][0] == md5sum: continue @@ -303,16 +375,31 @@ def parse_and_deliver(maildir, url, statedir): msg.set_unixfrom("\"%s\" " %(url)) msg.add_header("From", "\"%s\" " %(author)) msg.add_header("To", "\"%s\" " %(url)) - createddate = datetime.datetime(*item["updated_parsed"][0:6]) \ + if prevmessageid: + msg.add_header("References", prevmessageid) + createddate = datetime.datetime.now() \ .strftime("%a, %e %b %Y %T -0000") + try: + createddate = datetime.datetime(*item["updated_parsed"][0:6]) \ + .strftime("%a, %e %b %Y %T -0000") + except: + pass msg.add_header("Date", createddate) msg.add_header("Subject", item["title"]) msg.set_default_type("text/plain") - htmlpart = MIMEText(content.encode("utf-8"), "html", "utf-8") + htmlcontent = content.encode("utf-8") + htmlcontent = "%s\n\n

Item URL: %s

" %( \ + content, \ + item["link"], \ + item["link"] ) + htmlpart = MIMEText(htmlcontent.encode("utf-8"), "html", "utf-8") textparser = HTML2Text() textparser.feed(content.encode("utf-8")) textcontent = textparser.gettext() + textcontent = "%s\n\nItem URL: %s" %( \ + textcontent, \ + item["link"] ) textpart = MIMEText(textcontent.encode("utf-8"), "plain", "utf-8") msg.attach(textpart) msg.attach(htmlpart) @@ -337,14 +424,45 @@ def parse_and_deliver(maildir, url, statedir): os.unlink(fn) # now add to the database about the item - data = urllib.urlencode(( - ("message-id", messageid), \ - ("created", createddate), \ - ("contentmd5", md5sum) \ - )) - db[url + "|" + item["link"]] = data + if prevmessageid: + messageid = prevmessageid + " " + messageid + if item.has_key("guid") and item["guid"] != item["link"]: + data = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", createddate), \ + ("contentmd5", md5sum) \ + )) + db[url + "|" + item["guid"]] = data + try: + data = db[url + "|" + item["link"]] + data = cgi.parse_qs(data) + newdata = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", data["created"][0]), \ + ("contentmd5", data["contentmd5"][0]) \ + )) + db[url + "|" + item["link"]] = newdata + except: + db[url + "|" + item["link"]] = data + else: + data = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", createddate), \ + ("contentmd5", md5sum) \ + )) + db[url + "|" + item["link"]] = data + + if headers: + data = [] + for header in headers: + if header[0] in ["content-md5", "etag", "last-modified", "content-length"]: + data.append((header[0], header[1])) + if len(data) > 0: + data = urllib.urlencode(data) + feeddb[url] = data db.close() + feeddb.close() # first off, parse the command line arguments