+ liststarttags = [
+ "ul",
+ "ol",
+ "dl",
+ ]
+
+ cancontainflow = [
+ "div",
+ "li",
+ "dd",
+ "blockquote",
+ ]
+
+ def __init__(self,textwidth=70):
+ self.text = u''
+ self.curdata = u''
+ self.textwidth = textwidth
+ self.opentags = []
+ self.indentlevel = 0
+ HTMLParser.__init__(self)
+
+ def handle_starttag(self, tag, attrs):
+ tag_name = tag.lower()
+ if tag_name in self.blockleveltags:
+ # handle starting a new block - unless we're in a block element
+ # that can contain other blocks, we'll assume that we want to close
+ # the container
+ if tag_name == u'br':
+ self.handle_curdata()
+ self.opentags.append(tag_name)
+ self.opentags.pop()
+
+ if len(self.opentags) > 0:
+ self.handle_curdata()
+ self.opentags.pop()
+ self.opentags.append(tag_name)
+ else:
+ self.handle_curdata()
+ self.opentags.append(tag_name)
+
+ def handle_startendtag(self, tag, attrs):
+ if tag.lower() == u'br':
+ self.tags.append(u'br')
+ self.handle_curdata() # just handle the data, don't do anything else
+ self.tags.pop()
+
+ def handle_curdata(self):
+ if len(self.opentags) == 0:
+ return
+
+ if len(self.curdata) == 0:
+ return
+
+ tag_thats_done = self.opentags[-1]
+
+ if tag_thats_done in self.blockleveltags:
+ newlinerequired = self.text != u''
+ if newlinerequired:
+ self.text = self.text + u'\n\n'
+
+ if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]:
+ underline = u''
+ underlinechar = u'='
+ headingtext = self.curdata.encode("utf-8").strip()
+ headingtext = u'\n'.join( \
+ textwrap.wrap(headingtext, self.textwidth))
+
+ if tag_thats_done == u'h2':
+ underlinechar = u'-'
+ elif tag_thats_done != u'h1':
+ underlinechar = u'~'
+
+ if u'\n' in headingtext:
+ underline = underlinechar * self.textwidth
+ else:
+ underline = underlinechar * len(headingtext)
+ self.text = self.text \
+ + headingtext.encode("utf-8") + u'\n' \
+ + underline
+ elif tag_thats_done == "p":
+ paragraph = self.curdata.encode("utf-8").strip()
+ self.text = self.text \
+ + u'\n'.join(textwrap.wrap(paragraph, self.textwidth))
+ elif tag_thats_done == "pre":
+ self.text = self.text + self.curdata
+ elif tag_thats_done == "blockquote":
+ quote = self.curdata.encode("utf-8").strip()
+ self.text = self.text \
+ + u'> ' \
+ + u'> '.join(textwrap.wrap(quote, self.textwidth - 2))
+ elif tag_thats_done == "li":
+ item = self.curdata.encode("utf-8").strip()
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.text = self.text \
+ + u' * ' \
+ + u'\n '.join( \
+ textwrap.wrap(item, self.textwidth - 3))
+ self.curdata = u''
+ elif tag_thats_done == "dt":
+ definition = self.curdata.encode("utf-8").strip()
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n\n'
+ elif len(self.text) > 0 and self.text[-2] != u'\n':
+ self.text = self.text + u'\n'
+ definition = definition + "::"
+ self.text = self.text \
+ + '\n '.join(
+ textwrap.wrap(definition, self.textwidth - 1))
+ self.curdata = u''
+ elif tag_thats_done == "dd":
+ definition = self.curdata.encode("utf-8").strip()
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.text = self.text \
+ + ' ' \
+ + '\n '.join( \
+ textwrap.wrap(definition, self.textwidth - 4))
+ self.curdata = u''
+ elif tag_thats_done in self.liststarttags:
+ pass
+ else:
+ # we've got no idea what this tag does, so we'll
+ # make an assumption that we're not going to know later
+ if len(self.curdata) > 0:
+ self.text = self.text \
+ + u' ... ' \
+ + u'\n ... '.join( \
+ textwrap.wrap(self.curdata, self.textwidth - 5))
+ self.curdata = u''
+
+ if tag_thats_done in self.blockleveltags:
+ self.curdata = u''
+
+ def handle_endtag(self, tag):
+ try:
+ tagindex = self.opentags.index(tag)
+ except:
+ # closing tag we know nothing about.
+ # err. weird.
+ tagindex = 0
+
+ while tagindex < len(self.opentags) \
+ and tag in self.opentags[tagindex+1:]:
+ try:
+ tagindex = self.opentags.index(tag, tagindex+1)
+ except:
+ # well, we don't want to do that then
+ pass
+ if tagindex != len(self.opentags) - 1:
+ # Assuming the data was for the last opened tag first
+ self.handle_curdata()
+ # Now kill the list to be a slice before this tag was opened
+ self.opentags = self.opentags[:tagindex]
+
+ def handle_data(self, data):
+ self.curdata = self.curdata + unicode(data, "utf-8")
+
+ def handle_entityref(self, name):
+ entity = name
+ if HTML2Text.entities.has_key(name.lower()):
+ entity = HTML2Text.entities[name.lower()]
+ elif name[0] == "#":
+ entity = unichr(int(name[1:]))
+ else:
+ entity = "&" + name + ";"
+
+ self.curdata = self.curdata + unicode(entity, "utf-8")
+
+ def gettext(self):
+ self.handle_curdata()
+ if len(self.text) == 0 or self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.opentags = []
+ if len(self.text) > 0:
+ while len(self.text) > 1 and self.text[-1] == u'\n':
+ self.text = self.text[:-1]
+ self.text = self.text + u'\n'
+ return self.text
+
+def open_url(method, url):
+ redirectcount = 0
+ while redirectcount < 3:
+ (type, rest) = urllib.splittype(url)
+ (host, path) = urllib.splithost(rest)
+ (host, port) = urllib.splitport(host)
+ if port == None:
+ port = 80
+ try:
+ conn = httplib.HTTPConnection("%s:%s" %(host, port))
+ conn.request(method, path)
+ response = conn.getresponse()
+ if response.status in [301, 302, 303, 307]:
+ headers = response.getheaders()
+ for header in headers:
+ if header[0] == "location":
+ url = header[1]
+ elif response.status == 200:
+ return response
+ except:
+ pass
+ redirectcount = redirectcount + 1
+ return None
+
+def parse_and_deliver(maildir, url, statedir):
+ feedhandle = None
+ headers = None
+ # first check if we know about this feed already
+ feeddb = dbm.open(os.path.join(statedir, "feeds"), "c")
+ if feeddb.has_key(url):
+ data = feeddb[url]
+ data = cgi.parse_qs(data)
+ response = open_url("HEAD", url)
+ headers = None
+ if response:
+ headers = response.getheaders()
+ ischanged = False
+ try:
+ for header in headers:
+ if header[0] == "content-length":
+ if header[1] != data["content-length"][0]:
+ ischanged = True
+ elif header[0] == "etag":
+ if header[1] != data["etag"][0]:
+ ischanged = True
+ elif header[0] == "last-modified":
+ if header[1] != data["last-modified"][0]:
+ ischanged = True
+ elif header[0] == "content-md5":
+ if header[1] != data["content-md5"][0]:
+ ischanged = True
+ except:
+ ischanged = True
+ if ischanged:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+ else:
+ return # don't need to do anything, nothings changed.
+ else:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+
+ fp = feedparser.parse(feedhandle)
+ db = dbm.open(os.path.join(statedir, "seen"), "c")
+ for item in fp["items"]:
+ # have we seen it before?
+ # need to work out what the content is first...
+
+ if item.has_key("content"):
+ content = item["content"][0]["value"]
+ else:
+ content = item["summary"]
+
+ md5sum = md5.md5(content.encode("utf-8")).hexdigest()
+
+ prevmessageid = None
+
+ # check if there's a guid too - if that exists and we match the md5,
+ # return
+ if item.has_key("guid"):
+ if db.has_key(url + "|" + item["guid"]):
+ data = db[url + "|" + item["guid"]]
+ data = cgi.parse_qs(data)
+ if data["contentmd5"][0] == md5sum:
+ continue
+
+ if db.has_key(url + "|" + item["link"]):
+ data = db[url + "|" + item["link"]]
+ data = cgi.parse_qs(data)
+ if data.has_key("message-id"):
+ prevmessageid = data["message-id"][0]
+ if data["contentmd5"][0] == md5sum:
+ continue
+
+ try:
+ author = item["author"]
+ except:
+ author = url
+
+ # create a basic email message
+ msg = MIMEMultipart("alternative")
+ messageid = "<" \
+ + datetime.datetime.now().strftime("%Y%m%d%H%M") \
+ + "." \
+ + "".join( \
+ [random.choice( \
+ string.ascii_letters + string.digits \
+ ) for a in range(0,6) \
+ ]) + "@" + socket.gethostname() + ">"
+ msg.add_header("Message-ID", messageid)
+ msg.set_unixfrom("\"%s\" <rss2maildir@localhost>" %(url))
+ msg.add_header("From", "\"%s\" <rss2maildir@localhost>" %(author))
+ msg.add_header("To", "\"%s\" <rss2maildir@localhost>" %(url))
+ if prevmessageid:
+ msg.add_header("References", prevmessageid)
+ createddate = datetime.datetime.now() \
+ .strftime("%a, %e %b %Y %T -0000")