X-Git-Url: https://git.sommitrealweird.co.uk/rss2maildir.git/blobdiff_plain/d2619aa8bc21a7af97d2ae8b6d198a3e8c50aa33..1b8977fade98b0903f4f68cdfba1382a2f4a377a:/rss2maildir.py diff --git a/rss2maildir.py b/rss2maildir.py index 2d26217..6dad334 100755 --- a/rss2maildir.py +++ b/rss2maildir.py @@ -17,10 +17,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import mailbox import sys import os import stat +import httplib import urllib import feedparser @@ -53,12 +53,12 @@ entities = { "pound": "£", "copy": "©", "apos": "'", - "quote": "\"", + "quot": "\"", "nbsp": " ", } class HTML2Text(HTMLParser): - + def __init__(self): self.inheadingone = False self.inheadingtwo = False @@ -71,6 +71,9 @@ class HTML2Text(HTMLParser): self.headingtext = u'' self.blockquote = u'' self.inpre = False + self.inul = False + self.initem = False + self.item = u'' HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): @@ -86,22 +89,16 @@ class HTML2Text(HTMLParser): elif tag.lower() == "a": self.inlink = True elif tag.lower() == "br": - if self.inparagraph: - self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode('utf-8') + "\n" - self.currentparagraph = "" - elif self.inblockquote: - self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n" - self.blockquote = u'' - else: - self.text = self.text + "\n" + self.handle_br() elif tag.lower() == "blockquote": self.inblockquote = True - self.text = self.text + "\n" + self.text = self.text + u'\n' elif tag.lower() == "p": if self.text != "": - self.text = self.text + "\n\n" + self.text = self.text + u'\n\n' if self.inparagraph: - self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode("utf-8") + self.text = self.text \ + + u'\n'.join(textwrap.wrap(self.currentparagraph, 70)) self.currentparagraph = u'' self.inparagraph = True elif tag.lower() == "pre": @@ -109,50 +106,126 @@ class HTML2Text(HTMLParser): self.inpre = True self.inparagraph = False self.inblockquote = False + elif tag.lower() == "ul": + self.item = u'' + self.inul = True + self.text = self.text + "\n" + elif tag.lower() == "li" and self.inul: + if not self.initem: + self.initem = True + self.item = u'' + else: + self.text = self.text \ + + u' * ' \ + + u'\n '.join([a.strip() for a in \ + textwrap.wrap(self.item, 67)]) \ + + u'\n' + self.item = u'' def handle_startendtag(self, tag, attrs): if tag.lower() == "br": + self.handle_br() + + def handle_br(self): if self.inparagraph: - self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode("utf-8") + "\n" + self.text = self.text \ + + u'\n'.join( \ + [a \ + for a in textwrap.wrap( \ + self.currentparagraph, 70) \ + ] \ + ) \ + + u'\n' self.currentparagraph = u'' elif self.inblockquote: - self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n" - self.blockquote = "" + self.text = self.text \ + + u'\n> ' \ + + u'\n> '.join( \ + [a \ + for a in textwrap.wrap( \ + self.blockquote.encode("utf-8") \ + , 68) \ + ] \ + ) \ + + u'\n' + self.blockquote = u'' else: self.text = self.text + "\n" def handle_endtag(self, tag): if tag.lower() == "h1": self.inheadingone = False - self.text = self.text + "\n\n" + self.headingtext + "\n" + "=" * len(self.headingtext.strip()) + self.text = self.text \ + + u'\n\n' \ + + self.headingtext.encode("utf-8") \ + + u'\n' \ + + u'=' * len(self.headingtext.encode("utf-8").strip()) self.headingtext = u'' elif tag.lower() == "h2": self.inheadingtwo = False - self.text = self.text + "\n\n" + self.headingtext + "\n" + "-" * len(self.headingtext.strip()) + self.text = self.text \ + + u'\n\n' \ + + self.headingtext.encode("utf-8") \ + + u'\n' \ + + u'-' * len(self.headingtext.encode("utf-8").strip()) self.headingtext = u'' elif tag.lower() in ["h3", "h4", "h5", "h6"]: self.inotherheading = False - self.text = self.text + "\n\n" + self.headingtext + "\n" + "~" * len(self.headingtext.strip()) + self.text = self.text \ + + u'\n\n' \ + + self.headingtext.encode("utf-8") \ + + u'\n' \ + + u'~' * len(self.headingtext.encode("utf-8").strip()) self.headingtext = u'' elif tag.lower() == "p": - self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)) + self.text = self.text \ + + u'\n'.join(textwrap.wrap( \ + self.currentparagraph, 70) \ + ) self.inparagraph = False + self.currentparagraph = u'' elif tag.lower() == "blockquote": - self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n" + self.text = self.text \ + + u'\n> ' \ + + u'\n> '.join( \ + [a.strip() \ + for a in textwrap.wrap( \ + self.blockquote, 68)] \ + ) \ + + u'\n' self.inblockquote = False self.blockquote = u'' elif tag.lower() == "pre": self.inpre = False + elif tag.lower() == "li": + self.initem = False + if self.item != "": + self.text = self.text \ + + u' * ' \ + + u'\n '.join( \ + [a.strip() for a in textwrap.wrap(self.item, 67)]) \ + + u'\n' + self.item = u'' + elif tag.lower() == "ul": + self.inul = False def handle_data(self, data): if self.inheadingone or self.inheadingtwo or self.inotherheading: - self.headingtext = self.headingtext + unicode(data, "utf-8").strip() + u' ' + self.headingtext = self.headingtext \ + + unicode(data, "utf-8").strip() \ + + u' ' elif self.inblockquote: - self.blockquote = self.blockquote + unicode(data, "utf-8").strip() + u' ' + self.blockquote = self.blockquote \ + + unicode(data, "utf-8").strip() \ + + u' ' elif self.inparagraph: - self.currentparagraph = self.currentparagraph + unicode(data, "utf-8").strip() + u' ' + self.currentparagraph = self.currentparagraph \ + + unicode(data, "utf-8").strip() \ + + u' ' + elif self.inul and self.initem: + self.item = self.item + unicode(data, "utf-8") elif self.inpre: - self.text = self.text + data.encode("utf-8") + self.text = self.text + unicode(data, "utf-8") else: self.text = self.text + unicode(data, "utf-8").strip() + u' ' @@ -166,11 +239,12 @@ class HTML2Text(HTMLParser): entity = "&" + name + ";" if self.inparagraph: - self.currentparagraph = self.currentparagraph + entity + self.currentparagraph = self.currentparagraph \ + + unicode(entity, "utf-8") elif self.inblockquote: - self.blockquote = self.blockquote + entity + self.blockquote = self.blockquote + unicode(entity, "utf-8") else: - self.text = self.text + entity + self.text = self.text + unicode(entity, "utf-8") def gettext(self): data = self.text @@ -178,9 +252,79 @@ class HTML2Text(HTMLParser): data = data + "\n".join(textwrap.wrap(self.currentparagraph, 70)) return data +def open_url(method, url): + redirectcount = 0 + while redirectcount < 3: + (type, rest) = urllib.splittype(url) + (host, path) = urllib.splithost(rest) + (host, port) = urllib.splitport(host) + if port == None: + port = 80 + try: + conn = httplib.HTTPConnection("%s:%s" %(host, port)) + conn.request(method, path) + response = conn.getresponse() + if response.status in [301, 302, 303, 307]: + headers = response.getheaders() + for header in headers: + if header[0] == "location": + url = header[1] + elif response.status == 200: + return response + except: + pass + redirectcount = redirectcount + 1 + return None + def parse_and_deliver(maildir, url, statedir): - md = mailbox.Maildir(maildir) - fp = feedparser.parse(url) + feedhandle = None + headers = None + # first check if we know about this feed already + feeddb = dbm.open(os.path.join(statedir, "feeds"), "c") + if feeddb.has_key(url): + data = feeddb[url] + data = cgi.parse_qs(data) + response = open_url("HEAD", url) + headers = None + if response: + headers = response.getheaders() + ischanged = False + try: + for header in headers: + if header[0] == "content-length": + if header[1] != data["content-length"][0]: + ischanged = True + elif header[0] == "etag": + if header[1] != data["etag"][0]: + ischanged = True + elif header[0] == "last-modified": + if header[1] != data["last-modified"][0]: + ischanged = True + elif header[0] == "content-md5": + if header[1] != data["content-md5"][0]: + ischanged = True + except: + ischanged = True + if ischanged: + response = open_url("GET", url) + if response != None: + headers = response.getheaders() + feedhandle = response + else: + sys.stderr.write("Failed to fetch feed: %s\n" %(url)) + return + else: + return # don't need to do anything, nothings changed. + else: + response = open_url("GET", url) + if response != None: + headers = response.getheaders() + feedhandle = response + else: + sys.stderr.write("Failed to fetch feed: %s\n" %(url)) + return + + fp = feedparser.parse(feedhandle) db = dbm.open(os.path.join(statedir, "seen"), "c") for item in fp["items"]: # have we seen it before? @@ -193,9 +337,22 @@ def parse_and_deliver(maildir, url, statedir): md5sum = md5.md5(content.encode("utf-8")).hexdigest() + prevmessageid = None + + # check if there's a guid too - if that exists and we match the md5, + # return + if item.has_key("guid"): + if db.has_key(url + "|" + item["guid"]): + data = db[url + "|" + item["guid"]] + data = cgi.parse_qs(data) + if data["contentmd5"][0] == md5sum: + continue + if db.has_key(url + "|" + item["link"]): data = db[url + "|" + item["link"]] data = cgi.parse_qs(data) + if data.has_key("message-id"): + prevmessageid = data["message-id"][0] if data["contentmd5"][0] == md5sum: continue @@ -206,12 +363,27 @@ def parse_and_deliver(maildir, url, statedir): # create a basic email message msg = MIMEMultipart("alternative") - messageid = "<" + datetime.datetime.now().strftime("%Y%m%d%H%M") + "." + "".join([random.choice(string.ascii_letters + string.digits) for a in range(0,6)]) + "@" + socket.gethostname() + ">" + messageid = "<" \ + + datetime.datetime.now().strftime("%Y%m%d%H%M") \ + + "." \ + + "".join( \ + [random.choice( \ + string.ascii_letters + string.digits \ + ) for a in range(0,6) \ + ]) + "@" + socket.gethostname() + ">" msg.add_header("Message-ID", messageid) msg.set_unixfrom("\"%s\" " %(url)) msg.add_header("From", "\"%s\" " %(author)) msg.add_header("To", "\"%s\" " %(url)) - createddate = datetime.datetime(*item["updated_parsed"][0:6]).strftime("%a, %e %b %Y %T -0000") + if prevmessageid: + msg.add_header("References", prevmessageid) + createddate = datetime.datetime.now() \ + .strftime("%a, %e %b %Y %T -0000") + try: + createddate = datetime.datetime(*item["updated_parsed"][0:6]) \ + .strftime("%a, %e %b %Y %T -0000") + except: + pass msg.add_header("Date", createddate) msg.add_header("Subject", item["title"]) msg.set_default_type("text/plain") @@ -226,7 +398,14 @@ def parse_and_deliver(maildir, url, statedir): # start by working out the filename we should be writting to, we do # this following the normal maildir style rules - fname = str(os.getpid()) + "." + socket.gethostname() + "." + "".join([random.choice(string.ascii_letters + string.digits) for a in range(0,10)]) + "." + datetime.datetime.now().strftime('%s') + fname = str(os.getpid()) \ + + "." + socket.gethostname() \ + + "." + "".join( \ + [random.choice( \ + string.ascii_letters + string.digits \ + ) for a in range(0,10) \ + ]) + "." \ + + datetime.datetime.now().strftime('%s') fn = os.path.join(maildir, "tmp", fname) fh = open(fn, "w") fh.write(msg.as_string()) @@ -237,10 +416,45 @@ def parse_and_deliver(maildir, url, statedir): os.unlink(fn) # now add to the database about the item - data = urllib.urlencode((("message-id", messageid), ("created", createddate), ("contentmd5", md5sum))) - db[url + "|" + item["link"]] = data + if prevmessageid: + messageid = prevmessageid + " " + messageid + if item.has_key("guid") and item["guid"] != item["link"]: + data = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", createddate), \ + ("contentmd5", md5sum) \ + )) + db[url + "|" + item["guid"]] = data + try: + data = db[url + "|" + item["link"]] + data = cgi.parse_qs(data) + newdata = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", data["created"][0]), \ + ("contentmd5", data["contentmd5"][0]) \ + )) + db[url + "|" + item["link"]] = newdata + except: + db[url + "|" + item["link"]] = data + else: + data = urllib.urlencode(( \ + ("message-id", messageid), \ + ("created", createddate), \ + ("contentmd5", md5sum) \ + )) + db[url + "|" + item["link"]] = data + + if headers: + data = [] + for header in headers: + if header[0] in ["content-md5", "etag", "last-modified", "content-length"]: + data.append((header[0], header[1])) + if len(data) > 0: + data = urllib.urlencode(data) + feeddb[url] = data db.close() + feeddb.close() # first off, parse the command line arguments @@ -267,7 +481,8 @@ if options.conf != None: configfile = options.conf except: # should exit here as the specified file doesn't exist - sys.stderr.write("Config file %s does not exist. Exiting.\n" %(options.conf,)) + sys.stderr.write( \ + "Config file %s does not exist. Exiting.\n" %(options.conf,)) sys.exit(2) else: # check through the default locations @@ -296,7 +511,8 @@ if options.statedir != None: try: mode = os.stat(state_dir)[stat.ST_MODE] if not stat.S_ISDIR(mode): - sys.stderr.write("State directory (%s) is not a directory\n" %(state_dir)) + sys.stderr.write( \ + "State directory (%s) is not a directory\n" %(state_dir)) sys.exit(1) except: # try to make the directory @@ -310,7 +526,8 @@ elif scp.has_option("general", "state_dir"): try: mode = os.stat(state_dir)[stat.ST_MODE] if not stat.S_ISDIR(mode): - sys.stderr.write("State directory (%s) is not a directory\n" %(state_dir)) + sys.stderr.write( \ + "State directory (%s) is not a directory\n" %(state_dir)) sys.exit(1) except: # try to create it @@ -318,19 +535,22 @@ elif scp.has_option("general", "state_dir"): os.mkdir(new_state_dir) state_dir = new_state_dir except: - sys.stderr.write("Couldn't create state directory %s\n" %(new_state_dir)) + sys.stderr.write( \ + "Couldn't create state directory %s\n" %(new_state_dir)) sys.exit(1) else: try: mode = os.stat(state_dir)[stat.ST_MODE] if not stat.S_ISDIR(mode): - sys.stderr.write("State directory %s is not a directory\n" %(state_dir)) + sys.stderr.write( \ + "State directory %s is not a directory\n" %(state_dir)) sys.exit(1) except: try: os.mkdir(state_dir) except: - sys.stderr.write("State directory %s could not be created\n" %(state_dir)) + sys.stderr.write( \ + "State directory %s could not be created\n" %(state_dir)) sys.exit(1) if scp.has_option("general", "maildir_root"): @@ -339,7 +559,9 @@ if scp.has_option("general", "maildir_root"): try: mode = os.stat(maildir_root)[stat.ST_MODE] if not stat.S_ISDIR(mode): - sys.stderr.write("Maildir Root %s is not a directory\n" %(maildir_root)) + sys.stderr.write( \ + "Maildir Root %s is not a directory\n" \ + %(maildir_root)) sys.exit(1) except: try: @@ -400,7 +622,9 @@ for section in feeds: os.mkdir(os.path.join(maildir, "cur")) os.mkdir(os.path.join(maildir, "tmp")) except: - sys.stderr.write("Couldn't create required maildir directories for %s\n" %(section,)) + sys.stderr.write( \ + "Couldn't create required maildir directories for %s\n" \ + %(section,)) sys.exit(1) # right - we've got the directories, we've got the section, we know the