4 # rss2maildir.py - RSS feeds to Maildir 1 email per item
5 # Copyright (C) 2007 Brett Parker <iDunno@sommitrealweird.co.uk>
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
28 from email.MIMEMultipart import MIMEMultipart
29 from email.MIMEText import MIMEText
38 from optparse import OptionParser
39 from ConfigParser import SafeConfigParser
41 from base64 import b64encode
49 from HTMLParser import HTMLParser
51 class HTML2Text(HTMLParser):
209 def __init__(self,textwidth=70):
212 self.textwidth = textwidth
215 self.ignorenodata = False
219 HTMLParser.__init__(self)
221 def handle_starttag(self, tag, attrs):
222 tag_name = tag.lower()
223 if tag_name in self.blockleveltags:
224 # handle starting a new block - unless we're in a block element
225 # that can contain other blocks, we'll assume that we want to close
227 if len(self.opentags) > 1 and self.opentags[-1] == u'li':
228 self.handle_curdata()
230 if tag_name == u'ol':
231 self.handle_curdata()
232 self.listcount.append(1)
233 self.listlevel = len(self.listcount) - 1
235 if tag_name == u'dl':
236 self.indentlevel = self.indentlevel + 4
238 if tag_name in self.liststarttags:
239 smallist = self.opentags[-3:-1]
241 for prev_listtag in smallist:
242 if prev_listtag in [u'dl', u'ol']:
243 self.indentlevel = self.indentlevel + 4
245 elif prev_listtag == u'ul':
246 self.indentlevel = self.indentlevel + 3
249 if len(self.opentags) > 0:
250 self.handle_curdata()
251 if tag_name not in self.cancontainflow:
253 self.opentags.append(tag_name)
255 if tag_name == "span":
259 listcount = self.listcount[-1]
263 if tag_name == u'dd' and len(self.opentags) > 1 \
264 and self.opentags[-1] == u'dt':
265 self.handle_curdata()
267 elif tag_name == u'dt' and len(self.opentags) > 1 \
268 and self.opentags[-1] == u'dd':
269 self.handle_curdata()
271 elif tag_name == u'a':
273 if attr[0].lower() == u'href':
274 self.urls.append(attr[1].decode('utf-8'))
275 self.curdata = self.curdata + u'`'
276 self.opentags.append(tag_name)
278 elif tag_name == u'img':
279 self.handle_image(attrs)
281 elif tag_name == u'br':
285 # we don't know the tag, so lets avoid handling it!
288 def handle_startendtag(self, tag, attrs):
289 if tag.lower() == u'br':
291 elif tag.lower() == u'img':
292 self.handle_image(attrs)
296 self.handle_curdata()
297 self.opentags.append(u'br')
298 self.handle_curdata()
301 def handle_image(self, attrs):
306 alt = attr[1].decode('utf-8')
307 elif attr[0] == 'src':
308 url = attr[1].decode('utf-8')
311 if self.images.has_key(alt):
312 if self.images[alt]["url"] == url:
313 self.curdata = self.curdata \
316 while self.images.has_key(alt):
318 self.images[alt] = {"url": url}
319 self.curdata = self.curdata \
322 self.images[alt] = {"url": url}
323 self.curdata = self.curdata \
326 if self.images.has_key(url):
327 self.curdata = self.curdata \
330 self.images[url] = {}
331 self.images[url]["url"] =url
332 self.curdata = self.curdata \
335 def handle_curdata(self):
337 if len(self.opentags) == 0:
340 tag_thats_done = self.opentags[-1]
342 if len(self.curdata) == 0:
345 if tag_thats_done == u'br':
346 if len(self.text) == 0 or self.text[-1] != '\n':
347 self.text = self.text + '\n'
348 self.ignorenodata = True
351 if len(self.curdata.strip()) == 0:
354 if tag_thats_done in self.blockleveltags:
355 newlinerequired = self.text != u''
356 if self.ignorenodata:
357 newlinerequired = False
358 self.ignorenodata = False
360 if tag_thats_done in [u'dt', u'dd', u'li'] \
361 and len(self.text) > 1 \
362 and self.text[-1] != u'\n':
363 self.text = self.text + u'\n'
364 elif len(self.text) > 2 \
365 and self.text[-1] != u'\n' \
366 and self.text[-2] != u'\n':
367 self.text = self.text + u'\n\n'
369 if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]:
372 headingtext = " ".join(self.curdata.split())
373 seperator = u'\n' + u' '*self.indentlevel
374 headingtext = seperator.join( \
377 self.textwidth - self.indentlevel \
381 if tag_thats_done == u'h2':
383 elif tag_thats_done != u'h1':
386 if u'\n' in headingtext:
387 underline = u' ' * self.indentlevel \
388 + underlinechar * (self.textwidth - self.indentlevel)
390 underline = u' ' * self.indentlevel \
391 + underlinechar * len(headingtext)
392 self.text = self.text \
393 + headingtext + u'\n' \
395 elif tag_thats_done in [u'p', u'div']:
396 paragraph = unicode( \
397 " ".join(self.curdata.strip().encode("utf-8").split()), \
399 seperator = u'\n' + u' ' * self.indentlevel
400 self.text = self.text \
401 + u' ' * self.indentlevel \
404 paragraph, self.textwidth - self.indentlevel))
405 elif tag_thats_done == "pre":
406 self.text = self.text + unicode( \
407 self.curdata.encode("utf-8"), "utf-8")
408 elif tag_thats_done == u'blockquote':
410 " ".join(self.curdata.encode("utf-8").strip().split()), \
412 seperator = u'\n' + u' ' * self.indentlevel + u' '
413 if len(self.text) > 0 and self.text[-1] != u'\n':
414 self.text = self.text + u'\n'
415 self.text = self.text \
420 self.textwidth - self.indentlevel - 2 \
424 elif tag_thats_done == "li":
425 item = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
426 if len(self.text) > 0 and self.text[-1] != u'\n':
427 self.text = self.text + u'\n'
428 # work out if we're in an ol rather than a ul
429 latesttags = self.opentags[-4:]
432 for thing in latesttags:
446 listmarker = u' %2d. ' %(self.listcount[-1])
447 self.listcount[-1] = self.listcount[-1] + 1
450 + u' ' * self.indentlevel \
452 self.text = self.text \
453 + u' ' * self.indentlevel \
458 self.textwidth - self.indentlevel - listindent \
462 elif tag_thats_done == u'dt':
463 definition = unicode(" ".join( \
464 self.curdata.encode("utf-8").strip().split()), \
466 if len(self.text) > 0 and self.text[-1] != u'\n':
467 self.text = self.text + u'\n\n'
468 elif len(self.text) > 1 and self.text[-2] != u'\n':
469 self.text = self.text + u'\n'
470 definition = u' ' * (self.indentlevel - 4) + definition + "::"
471 indentstring = u'\n' + u' ' * (self.indentlevel - 3)
472 self.text = self.text \
474 textwrap.wrap(definition, \
475 self.textwidth - self.indentlevel - 4))
477 elif tag_thats_done == u'dd':
478 definition = unicode(" ".join( \
479 self.curdata.encode("utf-8").strip().split()),
481 if len(definition) > 0:
482 if len(self.text) > 0 and self.text[-1] != u'\n':
483 self.text = self.text + u'\n'
484 indentstring = u'\n' + u' ' * self.indentlevel
485 self.text = self.text \
487 + indentstring.join( \
490 self.textwidth - self.indentlevel \
494 elif tag_thats_done == u'a':
495 self.curdata = self.curdata + u'`__'
497 elif tag_thats_done in self.liststarttags:
500 if tag_thats_done in self.blockleveltags:
503 self.ignorenodata = False
505 def handle_endtag(self, tag):
506 self.ignorenodata = False
511 tagindex = self.opentags.index(tag)
516 if tag in [u'br', u'img']:
520 self.indentlevel = self.indentlevel - 4
522 if tag in self.liststarttags:
523 if tag in [u'ol', u'dl', u'ul', u'dd']:
524 self.handle_curdata()
525 # find if there was a previous list level
526 smalllist = self.opentags[:-1]
528 for prev_listtag in smalllist:
529 if prev_listtag in [u'ol', u'dl']:
530 self.indentlevel = self.indentlevel - 4
532 elif prev_listtag == u'ul':
533 self.indentlevel = self.indentlevel - 3
537 self.listcount = self.listcount[:-1]
539 while tagindex < len(self.opentags) \
540 and tag in self.opentags[tagindex+1:]:
542 tagindex = self.opentags.index(tag, tagindex+1)
544 # well, we don't want to do that then
546 if tagindex != len(self.opentags) - 1:
547 # Assuming the data was for the last opened tag first
548 self.handle_curdata()
549 # Now kill the list to be a slice before this tag was opened
550 self.opentags = self.opentags[:tagindex + 1]
552 self.handle_curdata()
553 if self.opentags[-1] == tag:
556 def handle_data(self, data):
557 if len(self.opentags) == 0:
558 self.opentags.append(u'p')
559 self.curdata = self.curdata + data.decode("utf-8")
561 def handle_charref(self, name):
562 entity = unichr(int(name))
563 self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
566 def handle_entityref(self, name):
568 if HTML2Text.entities.has_key(name):
569 entity = HTML2Text.entities[name]
571 entity = "&" + name + ";"
573 self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
577 self.handle_curdata()
578 if len(self.text) == 0 or self.text[-1] != u'\n':
579 self.text = self.text + u'\n'
581 if len(self.text) > 0:
582 while len(self.text) > 1 and self.text[-1] == u'\n':
583 self.text = self.text[:-1]
584 self.text = self.text + u'\n'
585 if len(self.urls) > 0:
586 self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n'
588 if len(self.images.keys()) > 0:
589 self.text = self.text + u'\n.. ' \
591 ["|%s| image:: %s" %(a, self.images[a]["url"]) \
592 for a in self.images.keys()]) + u'\n'
596 def open_url(method, url):
598 while redirectcount < 3:
599 (type, rest) = urllib.splittype(url)
600 (host, path) = urllib.splithost(rest)
601 (host, port) = urllib.splitport(host)
605 conn = httplib.HTTPConnection("%s:%s" %(host, port))
606 conn.request(method, path)
607 response = conn.getresponse()
608 if response.status in [301, 302, 303, 307]:
609 headers = response.getheaders()
610 for header in headers:
611 if header[0] == "location":
613 elif response.status == 200:
617 redirectcount = redirectcount + 1
620 def parse_and_deliver(maildir, url, statedir):
623 # first check if we know about this feed already
624 feeddb = dbm.open(os.path.join(statedir, "feeds"), "c")
625 if feeddb.has_key(url):
627 data = cgi.parse_qs(data)
628 response = open_url("HEAD", url)
631 headers = response.getheaders()
634 for header in headers:
635 if header[0] == "content-length":
636 if header[1] != data["content-length"][0]:
638 elif header[0] == "etag":
639 if header[1] != data["etag"][0]:
641 elif header[0] == "last-modified":
642 if header[1] != data["last-modified"][0]:
644 elif header[0] == "content-md5":
645 if header[1] != data["content-md5"][0]:
650 response = open_url("GET", url)
652 headers = response.getheaders()
653 feedhandle = response
655 sys.stderr.write("Failed to fetch feed: %s\n" %(url))
658 return # don't need to do anything, nothings changed.
660 response = open_url("GET", url)
662 headers = response.getheaders()
663 feedhandle = response
665 sys.stderr.write("Failed to fetch feed: %s\n" %(url))
668 fp = feedparser.parse(feedhandle)
669 db = dbm.open(os.path.join(statedir, "seen"), "c")
670 for item in fp["items"]:
671 # have we seen it before?
672 # need to work out what the content is first...
674 if item.has_key("content"):
675 content = item["content"][0]["value"]
677 content = item["summary"]
679 md5sum = md5.md5(content.encode("utf-8")).hexdigest()
683 # check if there's a guid too - if that exists and we match the md5,
685 if item.has_key("guid"):
686 if db.has_key(url + "|" + item["guid"]):
687 data = db[url + "|" + item["guid"]]
688 data = cgi.parse_qs(data)
689 if data["contentmd5"][0] == md5sum:
692 if db.has_key(url + "|" + item["link"]):
693 data = db[url + "|" + item["link"]]
694 data = cgi.parse_qs(data)
695 if data.has_key("message-id"):
696 prevmessageid = data["message-id"][0]
697 if data["contentmd5"][0] == md5sum:
701 author = item["author"]
705 # create a basic email message
706 msg = MIMEMultipart("alternative")
708 + datetime.datetime.now().strftime("%Y%m%d%H%M") \
712 string.ascii_letters + string.digits \
713 ) for a in range(0,6) \
714 ]) + "@" + socket.gethostname() + ">"
715 msg.add_header("Message-ID", messageid)
716 msg.set_unixfrom("\"%s\" <rss2maildir@localhost>" %(url))
717 msg.add_header("From", "\"%s\" <rss2maildir@localhost>" %(author))
718 msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url))
720 msg.add_header("References", prevmessageid)
721 createddate = datetime.datetime.now() \
722 .strftime("%a, %e %b %Y %T -0000")
724 createddate = datetime.datetime(*item["updated_parsed"][0:6]) \
725 .strftime("%a, %e %b %Y %T -0000")
728 msg.add_header("Date", createddate)
729 subj_gen = HTML2Text()
730 title = item["title"].encode("utf-8")
731 title = re.sub(u'<', u'<', title)
732 title = re.sub(u'>', u'>', title)
734 msg.add_header("Subject", subj_gen.gettext())
735 msg.set_default_type("text/plain")
737 htmlcontent = content.encode("utf-8")
738 htmlcontent = "%s\n\n<p>Item URL: <a href='%s'>%s</a></p>" %( \
742 htmlpart = MIMEText(htmlcontent.encode("utf-8"), "html", "utf-8")
743 textparser = HTML2Text()
744 textparser.feed(content.encode("utf-8"))
745 textcontent = textparser.gettext()
746 textcontent = "%s\n\nItem URL: %s" %( \
749 textpart = MIMEText(textcontent.encode("utf-8"), "plain", "utf-8")
753 # start by working out the filename we should be writting to, we do
754 # this following the normal maildir style rules
755 fname = str(os.getpid()) \
756 + "." + socket.gethostname() \
759 string.ascii_letters + string.digits \
760 ) for a in range(0,10) \
762 + datetime.datetime.now().strftime('%s')
763 fn = os.path.join(maildir, "tmp", fname)
765 fh.write(msg.as_string())
767 # now move it in to the new directory
768 newfn = os.path.join(maildir, "new", fname)
772 # now add to the database about the item
774 messageid = prevmessageid + " " + messageid
775 if item.has_key("guid") and item["guid"] != item["link"]:
776 data = urllib.urlencode(( \
777 ("message-id", messageid), \
778 ("created", createddate), \
779 ("contentmd5", md5sum) \
781 db[url + "|" + item["guid"]] = data
783 data = db[url + "|" + item["link"]]
784 data = cgi.parse_qs(data)
785 newdata = urllib.urlencode(( \
786 ("message-id", messageid), \
787 ("created", data["created"][0]), \
788 ("contentmd5", data["contentmd5"][0]) \
790 db[url + "|" + item["link"]] = newdata
792 db[url + "|" + item["link"]] = data
794 data = urllib.urlencode(( \
795 ("message-id", messageid), \
796 ("created", createddate), \
797 ("contentmd5", md5sum) \
799 db[url + "|" + item["link"]] = data
803 for header in headers:
805 ["content-md5", "etag", "last-modified", "content-length"]:
806 data.append((header[0], header[1]))
808 data = urllib.urlencode(data)
814 if __name__ == "__main__":
815 # This only gets executed if we really called the program
816 # first off, parse the command line arguments
818 oparser = OptionParser()
820 "-c", "--conf", dest="conf",
821 help="location of config file"
824 "-s", "--statedir", dest="statedir",
825 help="location of directory to store state in"
828 (options, args) = oparser.parse_args()
830 # check for the configfile
834 if options.conf != None:
835 # does the file exist?
837 os.stat(options.conf)
838 configfile = options.conf
840 # should exit here as the specified file doesn't exist
842 "Config file %s does not exist. Exiting.\n" %(options.conf,))
845 # check through the default locations
847 os.stat("%s/.rss2maildir.conf" %(os.environ["HOME"],))
848 configfile = "%s/.rss2maildir.conf" %(os.environ["HOME"],)
851 os.stat("/etc/rss2maildir.conf")
852 configfile = "/etc/rss2maildir.conf"
854 sys.stderr.write("No config file found. Exiting.\n")
857 # Right - if we've got this far, we've got a config file, now for the hard
860 scp = SafeConfigParser()
863 maildir_root = "RSSMaildir"
866 if options.statedir != None:
867 state_dir = options.statedir
869 mode = os.stat(state_dir)[stat.ST_MODE]
870 if not stat.S_ISDIR(mode):
872 "State directory (%s) is not a directory\n" %(state_dir))
875 # try to make the directory
879 sys.stderr.write("Couldn't create statedir %s" %(state_dir))
881 elif scp.has_option("general", "state_dir"):
882 new_state_dir = scp.get("general", "state_dir")
884 mode = os.stat(new_state_dir)[stat.ST_MODE]
885 if not stat.S_ISDIR(mode):
887 "State directory (%s) is not a directory\n" %(state_dir))
890 state_dir = new_state_dir
894 os.mkdir(new_state_dir)
895 state_dir = new_state_dir
898 "Couldn't create state directory %s\n" %(new_state_dir))
902 mode = os.stat(state_dir)[stat.ST_MODE]
903 if not stat.S_ISDIR(mode):
905 "State directory %s is not a directory\n" %(state_dir))
912 "State directory %s could not be created\n" %(state_dir))
915 if scp.has_option("general", "maildir_root"):
916 maildir_root = scp.get("general", "maildir_root")
919 mode = os.stat(maildir_root)[stat.ST_MODE]
920 if not stat.S_ISDIR(mode):
922 "Maildir Root %s is not a directory\n" \
927 os.mkdir(maildir_root)
929 sys.stderr.write("Couldn't create Maildir Root %s\n" \
933 feeds = scp.sections()
935 feeds.remove("general")
939 for section in feeds:
940 # check if the directory exists
943 maildir = scp.get(section, "maildir")
947 maildir = urllib.urlencode(((section, maildir),)).split("=")[1]
948 maildir = os.path.join(maildir_root, maildir)
951 exists = os.stat(maildir)
952 if stat.S_ISDIR(exists[stat.ST_MODE]):
953 # check if there's a new, cur and tmp directory
955 mode = os.stat(os.path.join(maildir, "cur"))[stat.ST_MODE]
957 os.mkdir(os.path.join(maildir, "cur"))
958 if not stat.S_ISDIR(mode):
959 sys.stderr.write("Broken maildir: %s\n" %(maildir))
961 mode = os.stat(os.path.join(maildir, "tmp"))[stat.ST_MODE]
963 os.mkdir(os.path.join(maildir, "tmp"))
964 if not stat.S_ISDIR(mode):
965 sys.stderr.write("Broken maildir: %s\n" %(maildir))
967 mode = os.stat(os.path.join(maildir, "new"))[stat.ST_MODE]
968 if not stat.S_ISDIR(mode):
969 sys.stderr.write("Broken maildir: %s\n" %(maildir))
971 os.mkdir(os.path.join(maildir, "new"))
973 sys.stderr.write("Broken maildir: %s\n" %(maildir))
978 sys.stderr.write("Couldn't create root maildir %s\n" \
982 os.mkdir(os.path.join(maildir, "new"))
983 os.mkdir(os.path.join(maildir, "cur"))
984 os.mkdir(os.path.join(maildir, "tmp"))
987 "Couldn't create required maildir directories for %s\n" \
991 # right - we've got the directories, we've got the section, we know the
994 parse_and_deliver(maildir, section, state_dir)