+ return
+ tag = tag.lower()
+
+ if tag in [u'br', u'img']:
+ return
+
+ if tag == u'dl':
+ self.indentlevel = self.indentlevel - 4
+
+ if tag in self.liststarttags:
+ if tag in [u'ol', u'dl', u'ul', u'dd']:
+ self.handle_curdata()
+ # find if there was a previous list level
+ smalllist = self.opentags[:-1]
+ smalllist.reverse()
+ for prev_listtag in smalllist:
+ if prev_listtag in [u'ol', u'dl']:
+ self.indentlevel = self.indentlevel - 4
+ break
+ elif prev_listtag == u'ul':
+ self.indentlevel = self.indentlevel - 3
+ break
+
+ if tag == u'ol':
+ self.listcount = self.listcount[:-1]
+
+ while tagindex < len(self.opentags) \
+ and tag in self.opentags[tagindex+1:]:
+ try:
+ tagindex = self.opentags.index(tag, tagindex+1)
+ except:
+ # well, we don't want to do that then
+ pass
+ if tagindex != len(self.opentags) - 1:
+ # Assuming the data was for the last opened tag first
+ self.handle_curdata()
+ # Now kill the list to be a slice before this tag was opened
+ self.opentags = self.opentags[:tagindex + 1]
+ else:
+ self.handle_curdata()
+ if self.opentags[-1] == tag:
+ self.opentags.pop()
+
+ def handle_data(self, data):
+ if len(self.opentags) == 0:
+ self.opentags.append(u'p')
+ self.curdata = self.curdata + data.decode("utf-8")
+
+ def handle_charref(self, name):
+ try:
+ entity = unichr(int(name))
+ except:
+ if name[0] == 'x':
+ try:
+ entity = unichr(int('0%s' %(name,), 16))
+ except:
+ entity = u'#%s' %(name,)
+ else:
+ entity = u'#%s' %(name,)
+ self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+ "utf-8")
+
+ def handle_entityref(self, name):
+ entity = name
+ if HTML2Text.entities.has_key(name):
+ entity = HTML2Text.entities[name]
+ else:
+ entity = "&" + name + ";"
+
+ self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+ "utf-8")
+
+ def gettext(self):
+ self.handle_curdata()
+ if len(self.text) == 0 or self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.opentags = []
+ if len(self.text) > 0:
+ while len(self.text) > 1 and self.text[-1] == u'\n':
+ self.text = self.text[:-1]
+ self.text = self.text + u'\n'
+ if len(self.urls) > 0:
+ self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n'
+ self.urls = []
+ if len(self.images.keys()) > 0:
+ self.text = self.text + u'\n.. ' \
+ + u'\n.. '.join( \
+ ["|%s| image:: %s" %(a, self.images[a]["url"]) \
+ for a in self.images.keys()]) + u'\n'
+ self.images = {}
+ return self.text
+
+def open_url(method, url):
+ redirectcount = 0
+ while redirectcount < 3:
+ (type, rest) = urllib.splittype(url)
+ (host, path) = urllib.splithost(rest)
+ (host, port) = urllib.splitport(host)
+ if port == None:
+ port = 80
+ try:
+ conn = httplib.HTTPConnection("%s:%s" %(host, port))
+ conn.request(method, path)
+ response = conn.getresponse()
+ if response.status in [301, 302, 303, 307]:
+ headers = response.getheaders()
+ for header in headers:
+ if header[0] == "location":
+ url = header[1]
+ elif response.status == 200:
+ return response
+ except:
+ pass
+ redirectcount = redirectcount + 1
+ return None
+
+def parse_and_deliver(maildir, url, statedir):
+ feedhandle = None
+ headers = None
+ # first check if we know about this feed already
+ feeddb = dbm.open(os.path.join(statedir, "feeds"), "c")
+ if feeddb.has_key(url):
+ data = feeddb[url]
+ data = cgi.parse_qs(data)
+ response = open_url("HEAD", url)
+ headers = None
+ if response:
+ headers = response.getheaders()
+ ischanged = False
+ try:
+ for header in headers:
+ if header[0] == "content-length":
+ if header[1] != data["content-length"][0]:
+ ischanged = True
+ elif header[0] == "etag":
+ if header[1] != data["etag"][0]:
+ ischanged = True
+ elif header[0] == "last-modified":
+ if header[1] != data["last-modified"][0]:
+ ischanged = True
+ elif header[0] == "content-md5":
+ if header[1] != data["content-md5"][0]:
+ ischanged = True
+ except:
+ ischanged = True
+ if ischanged:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+ else:
+ return # don't need to do anything, nothings changed.
+ else:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+
+ fp = feedparser.parse(feedhandle)
+ db = dbm.open(os.path.join(statedir, "seen"), "c")
+ for item in fp["items"]:
+ # have we seen it before?
+ # need to work out what the content is first...
+
+ if item.has_key("content"):
+ content = item["content"][0]["value"]
+ else:
+ if item.has_key("description"):
+ content = item["description"]
+ else:
+ content = u''
+
+ md5sum = md5.md5(content.encode("utf-8")).hexdigest()
+
+ prevmessageid = None
+
+ db_guid_key = None
+ db_link_key = (url + u'|' + item["link"]).encode("utf-8")
+
+ # check if there's a guid too - if that exists and we match the md5,
+ # return
+ if item.has_key("guid"):
+ db_guid_key = (url + u'|' + item["guid"]).encode("utf-8")
+ if db.has_key(db_guid_key):
+ data = db[db_guid_key]
+ data = cgi.parse_qs(data)
+ if data["contentmd5"][0] == md5sum:
+ continue
+
+ if db.has_key(db_link_key):
+ data = db[db_link_key]
+ data = cgi.parse_qs(data)
+ if data.has_key("message-id"):
+ prevmessageid = data["message-id"][0]
+ if data["contentmd5"][0] == md5sum:
+ continue
+
+ try:
+ author = item["author"]
+ except:
+ author = url
+
+ # create a basic email message
+ msg = MIMEMultipart("alternative")
+ messageid = "<" \
+ + datetime.datetime.now().strftime("%Y%m%d%H%M") \
+ + "." \
+ + "".join( \
+ [random.choice( \
+ string.ascii_letters + string.digits \
+ ) for a in range(0,6) \
+ ]) + "@" + socket.gethostname() + ">"
+ msg.add_header("Message-ID", messageid)
+ msg.set_unixfrom("\"%s\" <rss2maildir@localhost>" %(url))
+ msg.add_header("From", "\"%s\" <rss2maildir@localhost>" %(author))
+ msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url))
+ if prevmessageid:
+ msg.add_header("References", prevmessageid)
+ createddate = datetime.datetime.now() \
+ .strftime("%a, %e %b %Y %T -0000")
+ try:
+ createddate = datetime.datetime(*item["updated_parsed"][0:6]) \
+ .strftime("%a, %e %b %Y %T -0000")
+ except:
+ pass
+ msg.add_header("Date", createddate)
+ subj_gen = HTML2Text()
+ title = item["title"]
+ title = re.sub(u'<', u'<', title)
+ title = re.sub(u'>', u'>', title)
+ subj_gen.feed(title.encode("utf-8"))
+ msg.add_header("Subject", subj_gen.gettext())
+ msg.set_default_type("text/plain")
+
+ htmlcontent = content.encode("utf-8")
+ htmlcontent = "%s\n\n<p>Item URL: <a href='%s'>%s</a></p>" %( \
+ content, \
+ item["link"], \
+ item["link"] )
+ htmlpart = MIMEText(htmlcontent.encode("utf-8"), "html", "utf-8")
+ textparser = HTML2Text()
+ textparser.feed(content.encode("utf-8"))
+ textcontent = textparser.gettext()
+ textcontent = "%s\n\nItem URL: %s" %( \
+ textcontent, \
+ item["link"] )
+ textpart = MIMEText(textcontent.encode("utf-8"), "plain", "utf-8")
+ msg.attach(textpart)
+ msg.attach(htmlpart)
+
+ # start by working out the filename we should be writting to, we do
+ # this following the normal maildir style rules
+ fname = str(os.getpid()) \
+ + "." + socket.gethostname() \
+ + "." + "".join( \
+ [random.choice( \
+ string.ascii_letters + string.digits \
+ ) for a in range(0,10) \
+ ]) + "." \
+ + datetime.datetime.now().strftime('%s')
+ fn = os.path.join(maildir, "tmp", fname)
+ fh = open(fn, "w")
+ fh.write(msg.as_string())
+ fh.close()
+ # now move it in to the new directory
+ newfn = os.path.join(maildir, "new", fname)
+ os.link(fn, newfn)
+ os.unlink(fn)
+
+ # now add to the database about the item
+ if prevmessageid:
+ messageid = prevmessageid + " " + messageid
+ if item.has_key("guid") and item["guid"] != item["link"]:
+ data = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", createddate), \
+ ("contentmd5", md5sum) \
+ ))
+ db[db_guid_key] = data
+ try:
+ data = db[db_link_key]
+ data = cgi.parse_qs(data)
+ newdata = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", data["created"][0]), \
+ ("contentmd5", data["contentmd5"][0]) \
+ ))
+ db[db_link_key] = newdata
+ except:
+ db[db_link_key] = data
+ else:
+ data = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", createddate), \
+ ("contentmd5", md5sum) \
+ ))
+ db[db_link_key] = data
+
+ if headers:
+ data = []
+ for header in headers:
+ if header[0] in \
+ ["content-md5", "etag", "last-modified", "content-length"]:
+ data.append((header[0], header[1]))
+ if len(data) > 0:
+ data = urllib.urlencode(data)
+ feeddb[url] = data
+
+ db.close()
+ feeddb.close()
+
+if __name__ == "__main__":
+ # This only gets executed if we really called the program
+ # first off, parse the command line arguments
+
+ oparser = OptionParser()
+ oparser.add_option(
+ "-c", "--conf", dest="conf",
+ help="location of config file"
+ )
+ oparser.add_option(
+ "-s", "--statedir", dest="statedir",
+ help="location of directory to store state in"
+ )
+
+ (options, args) = oparser.parse_args()
+
+ # check for the configfile
+
+ configfile = None
+
+ if options.conf != None:
+ # does the file exist?
+ try:
+ os.stat(options.conf)
+ configfile = options.conf
+ except:
+ # should exit here as the specified file doesn't exist
+ sys.stderr.write( \
+ "Config file %s does not exist. Exiting.\n" %(options.conf,))