X-Git-Url: https://git.sommitrealweird.co.uk/rss2maildir.git/blobdiff_plain/d2619aa8bc21a7af97d2ae8b6d198a3e8c50aa33..aa4dd6a06ba1dd47704ac7dd6bcaa062673162d1:/rss2maildir.py
diff --git a/rss2maildir.py b/rss2maildir.py
index 2d26217..a9abe4d 100755
--- a/rss2maildir.py
+++ b/rss2maildir.py
@@ -17,10 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import mailbox
import sys
import os
import stat
+import httplib
import urllib
import feedparser
@@ -39,148 +39,659 @@ from optparse import OptionParser
from ConfigParser import SafeConfigParser
from base64 import b64encode
-import md5
+
+if sys.version_info[0] == 2 and sys.version_info[1] >= 6:
+ import hashlib as md5
+else:
+ import md5
import cgi
import dbm
-from HTMLParser import HTMLParser
+import re
-entities = {
- "amp": "&",
- "lt": "<",
- "gt": ">",
- "pound": "£",
- "copy": "©",
- "apos": "'",
- "quote": "\"",
- "nbsp": " ",
- }
+from HTMLParser import HTMLParser
class HTML2Text(HTMLParser):
-
- def __init__(self):
- self.inheadingone = False
- self.inheadingtwo = False
- self.inotherheading = False
- self.inparagraph = True
- self.inblockquote = False
- self.inlink = False
+ entities = {
+ u'amp': u'&',
+ u'lt': u'<',
+ u'gt': u'>',
+ u'pound': u'£',
+ u'copy': u'©',
+ u'apos': u'\'',
+ u'quot': u'"',
+ u'nbsp': u' ',
+ u'ldquo': u'â',
+ u'rdquo': u'â',
+ u'lsquo': u'â',
+ u'rsquo': u'â',
+ u'laquo': u'«',
+ u'raquo': u'»',
+ u'lsaquo': u'â¹',
+ u'rsaquo': u'âº',
+ u'bull': u'â¢',
+ u'middot': u'·',
+ u'deg': u'°',
+ u'helip': u'â¦',
+ u'trade': u'â¢',
+ u'reg': u'®',
+ u'agrave': u'Ã ',
+ u'Agrave': u'Ã',
+ u'egrave': u'è',
+ u'Egrave': u'Ã',
+ u'igrave': u'ì',
+ u'Igrave': u'Ã',
+ u'ograve': u'ò',
+ u'Ograve': u'Ã',
+ u'ugrave': u'ù',
+ u'Ugrave': u'Ã',
+ u'aacute': u'á',
+ u'Aacute': u'Ã',
+ u'eacute': u'é',
+ u'Eacute': u'Ã',
+ u'iacute': u'Ã',
+ u'Iacute': u'Ã',
+ u'oacute': u'ó',
+ u'Oacute': u'Ã',
+ u'uacute': u'ú',
+ u'Uacute': u'Ã',
+ u'yactue': u'ý',
+ u'Yacute': u'Ã',
+ u'acirc': u'â',
+ u'Acirc': u'Ã',
+ u'ecirc': u'ê',
+ u'Ecirc': u'Ã',
+ u'icirc': u'î',
+ u'Icirc': u'Ã',
+ u'ocirc': u'ô',
+ u'Ocirc': u'Ã',
+ u'ucirc': u'û',
+ u'Ucirc': u'Ã',
+ u'atilde': u'ã',
+ u'Atilde': u'Ã',
+ u'ntilde': u'ñ',
+ u'Ntilde': u'Ã',
+ u'otilde': u'õ',
+ u'Otilde': u'Ã',
+ u'auml': u'ä',
+ u'Auml': u'Ã',
+ u'euml': u'ë',
+ u'Euml': u'Ã',
+ u'iuml': u'ï',
+ u'Iuml': u'Ã',
+ u'ouml': u'ö',
+ u'Ouml': u'Ã',
+ u'uuml': u'ü',
+ u'Uuml': u'Ã',
+ u'yuml': u'ÿ',
+ u'Yuml': u'Ÿ',
+ u'iexcl': u'¡',
+ u'iquest': u'¿',
+ u'ccedil': u'ç',
+ u'Ccedil': u'Ã',
+ u'oelig': u'Å',
+ u'OElig': u'Å',
+ u'szlig': u'Ã',
+ u'oslash': u'ø',
+ u'Oslash': u'Ã',
+ u'aring': u'Ã¥',
+ u'Aring': u'Ã
',
+ u'aelig': u'æ',
+ u'AElig': u'Ã',
+ u'thorn': u'þ',
+ u'THORN': u'Ã',
+ u'eth': u'ð',
+ u'ETH': u'Ã',
+ u'mdash': u'â',
+ u'ndash': u'â',
+ u'sect': u'§',
+ u'para': u'¶',
+ u'uarr': u'â',
+ u'darr': u'â',
+ u'larr': u'â',
+ u'rarr': u'â',
+ u'dagger': u'â ',
+ u'Dagger': u'â¡',
+ u'permil': u'â°',
+ u'prod': u'â',
+ u'infin': u'â',
+ u'radic': u'â',
+ u'there4': u'â´',
+ u'int': u'â«',
+ u'asymp': u'â',
+ u'ne': u'â ',
+ u'equiv': 'â¡',
+ u'le': u'â¤',
+ u'ge': u'â¥',
+ u'loz': u'â',
+ u'sum': u'â',
+ u'part': u'â',
+ u'prime': u'â²',
+ u'Prime': u'â³',
+ u'harr': u'â',
+ u'micro': u'µ',
+ u'not': u'¬',
+ u'plusmn': u'±',
+ u'divide': u'÷',
+ u'cent': u'¢',
+ u'euro': u'â¬',
+ }
+
+ blockleveltags = [
+ u'h1',
+ u'h2',
+ u'h3',
+ u'h4',
+ u'h5',
+ u'h6',
+ u'pre',
+ u'p',
+ u'ul',
+ u'ol',
+ u'dl',
+ u'li',
+ u'dt',
+ u'dd',
+ u'div',
+ u'blockquote',
+ ]
+
+ liststarttags = [
+ u'ul',
+ u'ol',
+ u'dl',
+ ]
+
+ cancontainflow = [
+ u'div',
+ u'li',
+ u'dd',
+ u'blockquote',
+ ]
+
+ def __init__(self,textwidth=70):
self.text = u''
- self.currentparagraph = u''
- self.headingtext = u''
- self.blockquote = u''
- self.inpre = False
+ self.curdata = u''
+ self.textwidth = textwidth
+ self.opentags = []
+ self.indentlevel = 0
+ self.ignorenodata = False
+ self.listcount = []
+ self.urls = []
+ self.images = {}
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
- if tag.lower() == "h1":
- self.inheadingone = True
- self.inparagraph = False
- elif tag.lower() == "h2":
- self.inheadingtwo = True
- self.inparagraph = False
- elif tag.lower() in ["h3", "h4", "h5", "h6"]:
- self.inotherheading = True
- self.inparagraph = False
- elif tag.lower() == "a":
- self.inlink = True
- elif tag.lower() == "br":
- if self.inparagraph:
- self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode('utf-8') + "\n"
- self.currentparagraph = ""
- elif self.inblockquote:
- self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n"
- self.blockquote = u''
+ tag_name = tag.lower()
+ if tag_name in self.blockleveltags:
+ # handle starting a new block - unless we're in a block element
+ # that can contain other blocks, we'll assume that we want to close
+ # the container
+ if len(self.opentags) > 1 and self.opentags[-1] == u'li':
+ self.handle_curdata()
+
+ if tag_name == u'ol':
+ self.handle_curdata()
+ self.listcount.append(1)
+ self.listlevel = len(self.listcount) - 1
+
+ if tag_name == u'dl':
+ self.indentlevel = self.indentlevel + 4
+
+ if tag_name in self.liststarttags:
+ smallist = self.opentags[-3:-1]
+ smallist.reverse()
+ for prev_listtag in smallist:
+ if prev_listtag in [u'dl', u'ol']:
+ self.indentlevel = self.indentlevel + 4
+ break
+ elif prev_listtag == u'ul':
+ self.indentlevel = self.indentlevel + 3
+ break
+
+ if len(self.opentags) > 0:
+ self.handle_curdata()
+ if tag_name not in self.cancontainflow:
+ self.opentags.pop()
+ self.opentags.append(tag_name)
+ else:
+ if tag_name == "span":
+ return
+ listcount = 0
+ try:
+ listcount = self.listcount[-1]
+ except:
+ pass
+
+ if tag_name == u'dd' and len(self.opentags) > 1 \
+ and self.opentags[-1] == u'dt':
+ self.handle_curdata()
+ self.opentags.pop()
+ elif tag_name == u'dt' and len(self.opentags) > 1 \
+ and self.opentags[-1] == u'dd':
+ self.handle_curdata()
+ self.opentags.pop()
+ elif tag_name == u'a':
+ for attr in attrs:
+ if attr[0].lower() == u'href':
+ self.urls.append(attr[1].decode('utf-8'))
+ self.curdata = self.curdata + u'`'
+ self.opentags.append(tag_name)
+ return
+ elif tag_name == u'img':
+ self.handle_image(attrs)
+ return
+ elif tag_name == u'br':
+ self.handle_br()
+ return
else:
- self.text = self.text + "\n"
- elif tag.lower() == "blockquote":
- self.inblockquote = True
- self.text = self.text + "\n"
- elif tag.lower() == "p":
- if self.text != "":
- self.text = self.text + "\n\n"
- if self.inparagraph:
- self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode("utf-8")
- self.currentparagraph = u''
- self.inparagraph = True
- elif tag.lower() == "pre":
- self.text = self.text + "\n"
- self.inpre = True
- self.inparagraph = False
- self.inblockquote = False
+ # we don't know the tag, so lets avoid handling it!
+ return
def handle_startendtag(self, tag, attrs):
- if tag.lower() == "br":
- if self.inparagraph:
- self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70)).encode("utf-8") + "\n"
- self.currentparagraph = u''
- elif self.inblockquote:
- self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n"
- self.blockquote = ""
+ if tag.lower() == u'br':
+ self.handle_br()
+ elif tag.lower() == u'img':
+ self.handle_image(attrs)
+ return
+
+ def handle_br(self):
+ self.handle_curdata()
+ self.opentags.append(u'br')
+ self.handle_curdata()
+ self.opentags.pop()
+
+ def handle_image(self, attrs):
+ alt = u''
+ url = u''
+ for attr in attrs:
+ if attr[0] == 'alt':
+ if isinstance(attr[1], str):
+ alt = u'%s' %(attr[1].decode("utf-8"))
+ else:
+ alt = attr[1]
+ elif attr[0] == 'src':
+ if isinstance(attr[1], str):
+ url = u'%s' %(attr[1].decode("utf-8"))
+ else:
+ url = attr[1]
+ if url:
+ if alt:
+ if self.images.has_key(alt):
+ if self.images[alt]["url"] == url:
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ while self.images.has_key(alt):
+ alt = alt + "_"
+ self.images[alt] = {"url": url}
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ self.images[alt] = {"url": url}
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ if self.images.has_key(url):
+ self.curdata = self.curdata \
+ + u'|%s|' %(url,)
+ else:
+ self.images[url] = {}
+ self.images[url]["url"] =url
+ self.curdata = self.curdata \
+ + u'|%s|' %(url,)
+
+ def handle_curdata(self):
+
+ if len(self.opentags) == 0:
+ return
+
+ tag_thats_done = self.opentags[-1]
+
+ if len(self.curdata) == 0:
+ return
+
+ if tag_thats_done == u'br':
+ if len(self.text) == 0 or self.text[-1] != '\n':
+ self.text = self.text + '\n'
+ self.ignorenodata = True
+ return
+
+ if len(self.curdata.strip()) == 0:
+ return
+
+ if tag_thats_done in self.blockleveltags:
+ newlinerequired = self.text != u''
+ if self.ignorenodata:
+ newlinerequired = False
+ self.ignorenodata = False
+ if newlinerequired:
+ if tag_thats_done in [u'dt', u'dd', u'li'] \
+ and len(self.text) > 1 \
+ and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ elif len(self.text) > 2 \
+ and self.text[-1] != u'\n' \
+ and self.text[-2] != u'\n':
+ self.text = self.text + u'\n\n'
+
+ if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]:
+ underline = u''
+ underlinechar = u'='
+ headingtext = " ".join(self.curdata.split())
+ seperator = u'\n' + u' '*self.indentlevel
+ headingtext = seperator.join( \
+ textwrap.wrap( \
+ headingtext, \
+ self.textwidth - self.indentlevel \
+ ) \
+ )
+
+ if tag_thats_done == u'h2':
+ underlinechar = u'-'
+ elif tag_thats_done != u'h1':
+ underlinechar = u'~'
+
+ if u'\n' in headingtext:
+ underline = u' ' * self.indentlevel \
+ + underlinechar * (self.textwidth - self.indentlevel)
else:
- self.text = self.text + "\n"
+ underline = u' ' * self.indentlevel \
+ + underlinechar * len(headingtext)
+ self.text = self.text \
+ + headingtext + u'\n' \
+ + underline
+ elif tag_thats_done in [u'p', u'div']:
+ paragraph = unicode( \
+ " ".join(self.curdata.strip().encode("utf-8").split()), \
+ "utf-8")
+ seperator = u'\n' + u' ' * self.indentlevel
+ self.text = self.text \
+ + u' ' * self.indentlevel \
+ + seperator.join( \
+ textwrap.wrap( \
+ paragraph, self.textwidth - self.indentlevel))
+ elif tag_thats_done == "pre":
+ self.text = self.text + unicode( \
+ self.curdata.encode("utf-8"), "utf-8")
+ elif tag_thats_done == u'blockquote':
+ quote = unicode( \
+ " ".join(self.curdata.encode("utf-8").strip().split()), \
+ "utf-8")
+ seperator = u'\n' + u' ' * self.indentlevel + u' '
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.text = self.text \
+ + u' ' \
+ + seperator.join( \
+ textwrap.wrap( \
+ quote, \
+ self.textwidth - self.indentlevel - 2 \
+ )
+ )
+ self.curdata = u''
+ elif tag_thats_done == "li":
+ item = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ # work out if we're in an ol rather than a ul
+ latesttags = self.opentags[-4:]
+ latesttags.reverse()
+ isul = None
+ for thing in latesttags:
+ if thing == 'ul':
+ isul = True
+ break
+ elif thing == 'ol':
+ isul = False
+ break
+
+ listindent = 3
+ if not isul:
+ listindent = 4
+
+ listmarker = u' * '
+ if isul == False:
+ listmarker = u' %2d. ' %(self.listcount[-1])
+ self.listcount[-1] = self.listcount[-1] + 1
+
+ seperator = u'\n' \
+ + u' ' * self.indentlevel \
+ + u' ' * listindent
+ self.text = self.text \
+ + u' ' * self.indentlevel \
+ + listmarker \
+ + seperator.join( \
+ textwrap.wrap( \
+ item, \
+ self.textwidth - self.indentlevel - listindent \
+ ) \
+ )
+ self.curdata = u''
+ elif tag_thats_done == u'dt':
+ definition = unicode(" ".join( \
+ self.curdata.encode("utf-8").strip().split()), \
+ "utf-8")
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n\n'
+ elif len(self.text) > 1 and self.text[-2] != u'\n':
+ self.text = self.text + u'\n'
+ definition = u' ' * (self.indentlevel - 4) + definition + "::"
+ indentstring = u'\n' + u' ' * (self.indentlevel - 3)
+ self.text = self.text \
+ + indentstring.join(
+ textwrap.wrap(definition, \
+ self.textwidth - self.indentlevel - 4))
+ self.curdata = u''
+ elif tag_thats_done == u'dd':
+ definition = unicode(" ".join( \
+ self.curdata.encode("utf-8").strip().split()),
+ "utf-8")
+ if len(definition) > 0:
+ if len(self.text) > 0 and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ indentstring = u'\n' + u' ' * self.indentlevel
+ self.text = self.text \
+ + indentstring \
+ + indentstring.join( \
+ textwrap.wrap( \
+ definition, \
+ self.textwidth - self.indentlevel \
+ ) \
+ )
+ self.curdata = u''
+ elif tag_thats_done == u'a':
+ self.curdata = self.curdata + u'`__'
+ pass
+ elif tag_thats_done in self.liststarttags:
+ pass
+
+ if tag_thats_done in self.blockleveltags:
+ self.curdata = u''
+
+ self.ignorenodata = False
def handle_endtag(self, tag):
- if tag.lower() == "h1":
- self.inheadingone = False
- self.text = self.text + "\n\n" + self.headingtext + "\n" + "=" * len(self.headingtext.strip())
- self.headingtext = u''
- elif tag.lower() == "h2":
- self.inheadingtwo = False
- self.text = self.text + "\n\n" + self.headingtext + "\n" + "-" * len(self.headingtext.strip())
- self.headingtext = u''
- elif tag.lower() in ["h3", "h4", "h5", "h6"]:
- self.inotherheading = False
- self.text = self.text + "\n\n" + self.headingtext + "\n" + "~" * len(self.headingtext.strip())
- self.headingtext = u''
- elif tag.lower() == "p":
- self.text = self.text + "\n".join(textwrap.wrap(self.currentparagraph, 70))
- self.inparagraph = False
- elif tag.lower() == "blockquote":
- self.text = self.text + "\n> " + "\n> ".join([a.strip() for a in textwrap.wrap(self.blockquote, 68)]).encode("utf-8") + "\n"
- self.inblockquote = False
- self.blockquote = u''
- elif tag.lower() == "pre":
- self.inpre = False
+ self.ignorenodata = False
+ if tag == "span":
+ return
- def handle_data(self, data):
- if self.inheadingone or self.inheadingtwo or self.inotherheading:
- self.headingtext = self.headingtext + unicode(data, "utf-8").strip() + u' '
- elif self.inblockquote:
- self.blockquote = self.blockquote + unicode(data, "utf-8").strip() + u' '
- elif self.inparagraph:
- self.currentparagraph = self.currentparagraph + unicode(data, "utf-8").strip() + u' '
- elif self.inpre:
- self.text = self.text + data.encode("utf-8")
+ try:
+ tagindex = self.opentags.index(tag)
+ except:
+ return
+ tag = tag.lower()
+
+ if tag in [u'br', u'img']:
+ return
+
+ if tag == u'dl':
+ self.indentlevel = self.indentlevel - 4
+
+ if tag in self.liststarttags:
+ if tag in [u'ol', u'dl', u'ul', u'dd']:
+ self.handle_curdata()
+ # find if there was a previous list level
+ smalllist = self.opentags[:-1]
+ smalllist.reverse()
+ for prev_listtag in smalllist:
+ if prev_listtag in [u'ol', u'dl']:
+ self.indentlevel = self.indentlevel - 4
+ break
+ elif prev_listtag == u'ul':
+ self.indentlevel = self.indentlevel - 3
+ break
+
+ if tag == u'ol':
+ self.listcount = self.listcount[:-1]
+
+ while tagindex < len(self.opentags) \
+ and tag in self.opentags[tagindex+1:]:
+ try:
+ tagindex = self.opentags.index(tag, tagindex+1)
+ except:
+ # well, we don't want to do that then
+ pass
+ if tagindex != len(self.opentags) - 1:
+ # Assuming the data was for the last opened tag first
+ self.handle_curdata()
+ # Now kill the list to be a slice before this tag was opened
+ self.opentags = self.opentags[:tagindex + 1]
else:
- self.text = self.text + unicode(data, "utf-8").strip() + u' '
+ self.handle_curdata()
+ if self.opentags[-1] == tag:
+ self.opentags.pop()
+
+ def handle_data(self, data):
+ if len(self.opentags) == 0:
+ self.opentags.append(u'p')
+ self.curdata = self.curdata + data.decode("utf-8")
+
+ def handle_charref(self, name):
+ try:
+ entity = unichr(int(name))
+ except:
+ if name[0] == 'x':
+ try:
+ entity = unichr(int('0%s' %(name,), 16))
+ except:
+ entity = u'#%s' %(name,)
+ else:
+ entity = u'#%s' %(name,)
+ self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+ "utf-8")
def handle_entityref(self, name):
entity = name
- if entities.has_key(name.lower()):
- entity = entities[name.lower()]
- elif name[0] == "#":
- entity = unichr(int(name[1:]))
+ if HTML2Text.entities.has_key(name):
+ entity = HTML2Text.entities[name]
else:
entity = "&" + name + ";"
- if self.inparagraph:
- self.currentparagraph = self.currentparagraph + entity
- elif self.inblockquote:
- self.blockquote = self.blockquote + entity
- else:
- self.text = self.text + entity
+ self.curdata = self.curdata + unicode(entity.encode('utf-8'), \
+ "utf-8")
def gettext(self):
- data = self.text
- if self.inparagraph:
- data = data + "\n".join(textwrap.wrap(self.currentparagraph, 70))
- return data
+ self.handle_curdata()
+ if len(self.text) == 0 or self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ self.opentags = []
+ if len(self.text) > 0:
+ while len(self.text) > 1 and self.text[-1] == u'\n':
+ self.text = self.text[:-1]
+ self.text = self.text + u'\n'
+ if len(self.urls) > 0:
+ self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n'
+ self.urls = []
+ if len(self.images.keys()) > 0:
+ self.text = self.text + u'\n.. ' \
+ + u'\n.. '.join( \
+ ["|%s| image:: %s" %(a, self.images[a]["url"]) \
+ for a in self.images.keys()]) + u'\n'
+ self.images = {}
+ return self.text
+
+def open_url(method, url):
+ redirectcount = 0
+ while redirectcount < 3:
+ (type, rest) = urllib.splittype(url)
+ (host, path) = urllib.splithost(rest)
+ (host, port) = urllib.splitport(host)
+ if type == "https":
+ if port == None:
+ port = 443
+ elif port == None:
+ port = 80
+ try:
+ conn = None
+ if type == "http":
+ conn = httplib.HTTPConnection("%s:%s" %(host, port))
+ else:
+ conn = httplib.HTTPSConnection("%s:%s" %(host, port))
+ conn.request(method, path)
+ response = conn.getresponse()
+ if response.status in [301, 302, 303, 307]:
+ headers = response.getheaders()
+ for header in headers:
+ if header[0] == "location":
+ url = header[1]
+ elif response.status == 200:
+ return response
+ except:
+ pass
+ redirectcount = redirectcount + 1
+ return None
def parse_and_deliver(maildir, url, statedir):
- md = mailbox.Maildir(maildir)
- fp = feedparser.parse(url)
+ feedhandle = None
+ headers = None
+ # first check if we know about this feed already
+ feeddb = dbm.open(os.path.join(statedir, "feeds"), "c")
+ if feeddb.has_key(url):
+ data = feeddb[url]
+ data = cgi.parse_qs(data)
+ response = open_url("HEAD", url)
+ headers = None
+ if response:
+ headers = response.getheaders()
+ ischanged = False
+ try:
+ for header in headers:
+ if header[0] == "content-length":
+ if header[1] != data["content-length"][0]:
+ ischanged = True
+ elif header[0] == "etag":
+ if header[1] != data["etag"][0]:
+ ischanged = True
+ elif header[0] == "last-modified":
+ if header[1] != data["last-modified"][0]:
+ ischanged = True
+ elif header[0] == "content-md5":
+ if header[1] != data["content-md5"][0]:
+ ischanged = True
+ except:
+ ischanged = True
+ if ischanged:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+ else:
+ return # don't need to do anything, nothings changed.
+ else:
+ response = open_url("GET", url)
+ if response != None:
+ headers = response.getheaders()
+ feedhandle = response
+ else:
+ sys.stderr.write("Failed to fetch feed: %s\n" %(url))
+ return
+
+ fp = feedparser.parse(feedhandle)
db = dbm.open(os.path.join(statedir, "seen"), "c")
for item in fp["items"]:
# have we seen it before?
@@ -189,13 +700,33 @@ def parse_and_deliver(maildir, url, statedir):
if item.has_key("content"):
content = item["content"][0]["value"]
else:
- content = item["summary"]
+ if item.has_key("description"):
+ content = item["description"]
+ else:
+ content = u''
md5sum = md5.md5(content.encode("utf-8")).hexdigest()
- if db.has_key(url + "|" + item["link"]):
- data = db[url + "|" + item["link"]]
+ prevmessageid = None
+
+ db_guid_key = None
+ db_link_key = (url + u'|' + item["link"]).encode("utf-8")
+
+ # check if there's a guid too - if that exists and we match the md5,
+ # return
+ if item.has_key("guid"):
+ db_guid_key = (url + u'|' + item["guid"]).encode("utf-8")
+ if db.has_key(db_guid_key):
+ data = db[db_guid_key]
+ data = cgi.parse_qs(data)
+ if data["contentmd5"][0] == md5sum:
+ continue
+
+ if db.has_key(db_link_key):
+ data = db[db_link_key]
data = cgi.parse_qs(data)
+ if data.has_key("message-id"):
+ prevmessageid = data["message-id"][0]
if data["contentmd5"][0] == md5sum:
continue
@@ -206,27 +737,64 @@ def parse_and_deliver(maildir, url, statedir):
# create a basic email message
msg = MIMEMultipart("alternative")
- messageid = "<" + datetime.datetime.now().strftime("%Y%m%d%H%M") + "." + "".join([random.choice(string.ascii_letters + string.digits) for a in range(0,6)]) + "@" + socket.gethostname() + ">"
+ messageid = "<" \
+ + datetime.datetime.now().strftime("%Y%m%d%H%M") \
+ + "." \
+ + "".join( \
+ [random.choice( \
+ string.ascii_letters + string.digits \
+ ) for a in range(0,6) \
+ ]) + "@" + socket.gethostname() + ">"
msg.add_header("Message-ID", messageid)
msg.set_unixfrom("\"%s\" " %(url))
- msg.add_header("From", "\"%s\" " %(author))
- msg.add_header("To", "\"%s\" " %(url))
- createddate = datetime.datetime(*item["updated_parsed"][0:6]).strftime("%a, %e %b %Y %T -0000")
+ msg.add_header("From", "\"%s\" " %(author.encode("utf-8")))
+ msg.add_header("To", "\"%s\" " %(url.encode("utf-8")))
+ if prevmessageid:
+ msg.add_header("References", prevmessageid)
+ createddate = datetime.datetime.now() \
+ .strftime("%a, %e %b %Y %T -0000")
+ try:
+ createddate = datetime.datetime(*item["updated_parsed"][0:6]) \
+ .strftime("%a, %e %b %Y %T -0000")
+ except:
+ pass
msg.add_header("Date", createddate)
- msg.add_header("Subject", item["title"])
+ msg.add_header("X-rss2maildir-rundate", datetime.datetime.now() \
+ .strftime("%a, %e %b %Y %T -0000"))
+ subj_gen = HTML2Text()
+ title = item["title"]
+ title = re.sub(u'<', u'<', title)
+ title = re.sub(u'>', u'>', title)
+ subj_gen.feed(title.encode("utf-8"))
+ msg.add_header("Subject", subj_gen.gettext())
msg.set_default_type("text/plain")
- htmlpart = MIMEText(content.encode("utf-8"), "html", "utf-8")
+ htmlcontent = content.encode("utf-8")
+ htmlcontent = "%s\n\nItem URL: %s
" %( \
+ content, \
+ item["link"], \
+ item["link"] )
+ htmlpart = MIMEText(htmlcontent.encode("utf-8"), "html", "utf-8")
textparser = HTML2Text()
textparser.feed(content.encode("utf-8"))
textcontent = textparser.gettext()
+ textcontent = "%s\n\nItem URL: %s" %( \
+ textcontent, \
+ item["link"] )
textpart = MIMEText(textcontent.encode("utf-8"), "plain", "utf-8")
msg.attach(textpart)
msg.attach(htmlpart)
# start by working out the filename we should be writting to, we do
# this following the normal maildir style rules
- fname = str(os.getpid()) + "." + socket.gethostname() + "." + "".join([random.choice(string.ascii_letters + string.digits) for a in range(0,10)]) + "." + datetime.datetime.now().strftime('%s')
+ fname = str(os.getpid()) \
+ + "." + socket.gethostname() \
+ + "." + "".join( \
+ [random.choice( \
+ string.ascii_letters + string.digits \
+ ) for a in range(0,10) \
+ ]) + "." \
+ + datetime.datetime.now().strftime('%s')
fn = os.path.join(maildir, "tmp", fname)
fh = open(fn, "w")
fh.write(msg.as_string())
@@ -237,173 +805,225 @@ def parse_and_deliver(maildir, url, statedir):
os.unlink(fn)
# now add to the database about the item
- data = urllib.urlencode((("message-id", messageid), ("created", createddate), ("contentmd5", md5sum)))
- db[url + "|" + item["link"]] = data
+ if prevmessageid:
+ messageid = prevmessageid + " " + messageid
+ if item.has_key("guid") and item["guid"] != item["link"]:
+ data = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", createddate), \
+ ("contentmd5", md5sum) \
+ ))
+ db[db_guid_key] = data
+ try:
+ data = db[db_link_key]
+ data = cgi.parse_qs(data)
+ newdata = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", data["created"][0]), \
+ ("contentmd5", data["contentmd5"][0]) \
+ ))
+ db[db_link_key] = newdata
+ except:
+ db[db_link_key] = data
+ else:
+ data = urllib.urlencode(( \
+ ("message-id", messageid), \
+ ("created", createddate), \
+ ("contentmd5", md5sum) \
+ ))
+ db[db_link_key] = data
+
+ if headers:
+ data = []
+ for header in headers:
+ if header[0] in \
+ ["content-md5", "etag", "last-modified", "content-length"]:
+ data.append((header[0], header[1]))
+ if len(data) > 0:
+ data = urllib.urlencode(data)
+ feeddb[url] = data
db.close()
+ feeddb.close()
-# first off, parse the command line arguments
+if __name__ == "__main__":
+ # This only gets executed if we really called the program
+ # first off, parse the command line arguments
-oparser = OptionParser()
-oparser.add_option(
- "-c", "--conf", dest="conf",
- help="location of config file"
- )
-oparser.add_option(
- "-s", "--statedir", dest="statedir",
- help="location of directory to store state in"
- )
+ oparser = OptionParser()
+ oparser.add_option(
+ "-c", "--conf", dest="conf",
+ help="location of config file"
+ )
+ oparser.add_option(
+ "-s", "--statedir", dest="statedir",
+ help="location of directory to store state in"
+ )
-(options, args) = oparser.parse_args()
+ (options, args) = oparser.parse_args()
-# check for the configfile
+ # check for the configfile
-configfile = None
+ configfile = None
-if options.conf != None:
- # does the file exist?
- try:
- os.stat(options.conf)
- configfile = options.conf
- except:
- # should exit here as the specified file doesn't exist
- sys.stderr.write("Config file %s does not exist. Exiting.\n" %(options.conf,))
- sys.exit(2)
-else:
- # check through the default locations
- try:
- os.stat("%s/.rss2maildir.conf" %(os.environ["HOME"],))
- configfile = "%s/.rss2maildir.conf" %(os.environ["HOME"],)
- except:
+ if options.conf != None:
+ # does the file exist?
try:
- os.stat("/etc/rss2maildir.conf")
- configfile = "/etc/rss2maildir.conf"
+ os.stat(options.conf)
+ configfile = options.conf
except:
- sys.stderr.write("No config file found. Exiting.\n")
+ # should exit here as the specified file doesn't exist
+ sys.stderr.write( \
+ "Config file %s does not exist. Exiting.\n" %(options.conf,))
sys.exit(2)
+ else:
+ # check through the default locations
+ try:
+ os.stat("%s/.rss2maildir.conf" %(os.environ["HOME"],))
+ configfile = "%s/.rss2maildir.conf" %(os.environ["HOME"],)
+ except:
+ try:
+ os.stat("/etc/rss2maildir.conf")
+ configfile = "/etc/rss2maildir.conf"
+ except:
+ sys.stderr.write("No config file found. Exiting.\n")
+ sys.exit(2)
-# Right - if we've got this far, we've got a config file, now for the hard
-# bits...
+ # Right - if we've got this far, we've got a config file, now for the hard
+ # bits...
-scp = SafeConfigParser()
-scp.read(configfile)
+ scp = SafeConfigParser()
+ scp.read(configfile)
-maildir_root = "RSSMaildir"
-state_dir = "state"
+ maildir_root = "RSSMaildir"
+ state_dir = "state"
-if options.statedir != None:
- state_dir = options.statedir
- try:
- mode = os.stat(state_dir)[stat.ST_MODE]
- if not stat.S_ISDIR(mode):
- sys.stderr.write("State directory (%s) is not a directory\n" %(state_dir))
- sys.exit(1)
- except:
- # try to make the directory
+ if options.statedir != None:
+ state_dir = options.statedir
try:
- os.mkdir(state_dir)
+ mode = os.stat(state_dir)[stat.ST_MODE]
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write( \
+ "State directory (%s) is not a directory\n" %(state_dir))
+ sys.exit(1)
except:
- sys.stderr.write("Couldn't create statedir %s" %(state_dir))
- sys.exit(1)
-elif scp.has_option("general", "state_dir"):
- new_state_dir = scp.get("general", "state_dir")
- try:
- mode = os.stat(state_dir)[stat.ST_MODE]
- if not stat.S_ISDIR(mode):
- sys.stderr.write("State directory (%s) is not a directory\n" %(state_dir))
- sys.exit(1)
- except:
- # try to create it
+ # try to make the directory
+ try:
+ os.mkdir(state_dir)
+ except:
+ sys.stderr.write("Couldn't create statedir %s" %(state_dir))
+ sys.exit(1)
+ elif scp.has_option("general", "state_dir"):
+ new_state_dir = scp.get("general", "state_dir")
try:
- os.mkdir(new_state_dir)
- state_dir = new_state_dir
+ mode = os.stat(new_state_dir)[stat.ST_MODE]
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write( \
+ "State directory (%s) is not a directory\n" %(state_dir))
+ sys.exit(1)
+ else:
+ state_dir = new_state_dir
except:
- sys.stderr.write("Couldn't create state directory %s\n" %(new_state_dir))
- sys.exit(1)
-else:
+ # try to create it
+ try:
+ os.mkdir(new_state_dir)
+ state_dir = new_state_dir
+ except:
+ sys.stderr.write( \
+ "Couldn't create state directory %s\n" %(new_state_dir))
+ sys.exit(1)
+ else:
+ try:
+ mode = os.stat(state_dir)[stat.ST_MODE]
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write( \
+ "State directory %s is not a directory\n" %(state_dir))
+ sys.exit(1)
+ except:
+ try:
+ os.mkdir(state_dir)
+ except:
+ sys.stderr.write( \
+ "State directory %s could not be created\n" %(state_dir))
+ sys.exit(1)
+
+ if scp.has_option("general", "maildir_root"):
+ maildir_root = scp.get("general", "maildir_root")
+
try:
- mode = os.stat(state_dir)[stat.ST_MODE]
+ mode = os.stat(maildir_root)[stat.ST_MODE]
if not stat.S_ISDIR(mode):
- sys.stderr.write("State directory %s is not a directory\n" %(state_dir))
+ sys.stderr.write( \
+ "Maildir Root %s is not a directory\n" \
+ %(maildir_root))
sys.exit(1)
except:
try:
- os.mkdir(state_dir)
+ os.mkdir(maildir_root)
except:
- sys.stderr.write("State directory %s could not be created\n" %(state_dir))
+ sys.stderr.write("Couldn't create Maildir Root %s\n" \
+ %(maildir_root))
sys.exit(1)
-if scp.has_option("general", "maildir_root"):
- maildir_root = scp.get("general", "maildir_root")
-
-try:
- mode = os.stat(maildir_root)[stat.ST_MODE]
- if not stat.S_ISDIR(mode):
- sys.stderr.write("Maildir Root %s is not a directory\n" %(maildir_root))
- sys.exit(1)
-except:
+ feeds = scp.sections()
try:
- os.mkdir(maildir_root)
+ feeds.remove("general")
except:
- sys.stderr.write("Couldn't create Maildir Root %s\n" %(maildir_root))
- sys.exit(1)
-
-feeds = scp.sections()
-try:
- feeds.remove("general")
-except:
- pass
-
-for section in feeds:
- # check if the directory exists
- maildir = None
- try:
- maildir = scp.get(section, "maildir")
- except:
- maildir = section
+ pass
+
+ for section in feeds:
+ # check if the directory exists
+ maildir = None
+ try:
+ maildir = scp.get(section, "maildir")
+ except:
+ maildir = section
- maildir = urllib.urlencode(((section, maildir),)).split("=")[1]
- maildir = os.path.join(maildir_root, maildir)
+ maildir = urllib.urlencode(((section, maildir),)).split("=")[1]
+ maildir = os.path.join(maildir_root, maildir)
- try:
- exists = os.stat(maildir)
- if stat.S_ISDIR(exists[stat.ST_MODE]):
- # check if there's a new, cur and tmp directory
+ try:
+ exists = os.stat(maildir)
+ if stat.S_ISDIR(exists[stat.ST_MODE]):
+ # check if there's a new, cur and tmp directory
+ try:
+ mode = os.stat(os.path.join(maildir, "cur"))[stat.ST_MODE]
+ except:
+ os.mkdir(os.path.join(maildir, "cur"))
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write("Broken maildir: %s\n" %(maildir))
+ try:
+ mode = os.stat(os.path.join(maildir, "tmp"))[stat.ST_MODE]
+ except:
+ os.mkdir(os.path.join(maildir, "tmp"))
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write("Broken maildir: %s\n" %(maildir))
+ try:
+ mode = os.stat(os.path.join(maildir, "new"))[stat.ST_MODE]
+ if not stat.S_ISDIR(mode):
+ sys.stderr.write("Broken maildir: %s\n" %(maildir))
+ except:
+ os.mkdir(os.path.join(maildir, "new"))
+ else:
+ sys.stderr.write("Broken maildir: %s\n" %(maildir))
+ except:
try:
- mode = os.stat(os.path.join(maildir, "cur"))[stat.ST_MODE]
+ os.mkdir(maildir)
except:
- os.mkdir(os.path.join(maildir, "cur"))
- if not stat.S_ISDIR(mode):
- sys.stderr.write("Broken maildir: %s\n" %(maildir))
+ sys.stderr.write("Couldn't create root maildir %s\n" \
+ %(maildir))
+ sys.exit(1)
try:
- mode = os.stat(os.path.join(maildir, "tmp"))[stat.ST_MODE]
- except:
+ os.mkdir(os.path.join(maildir, "new"))
+ os.mkdir(os.path.join(maildir, "cur"))
os.mkdir(os.path.join(maildir, "tmp"))
- if not stat.S_ISDIR(mode):
- sys.stderr.write("Broken maildir: %s\n" %(maildir))
- try:
- mode = os.stat(os.path.join(maildir, "new"))[stat.ST_MODE]
- if not stat.S_ISDIR(mode):
- sys.stderr.write("Broken maildir: %s\n" %(maildir))
except:
- os.mkdir(os.path.join(maildir, "new"))
- else:
- sys.stderr.write("Broken maildir: %s\n" %(maildir))
- except:
- try:
- os.mkdir(maildir)
- except:
- sys.stderr.write("Couldn't create root maildir %s\n" %(maildir))
- sys.exit(1)
- try:
- os.mkdir(os.path.join(maildir, "new"))
- os.mkdir(os.path.join(maildir, "cur"))
- os.mkdir(os.path.join(maildir, "tmp"))
- except:
- sys.stderr.write("Couldn't create required maildir directories for %s\n" %(section,))
- sys.exit(1)
+ sys.stderr.write( \
+ "Couldn't create required maildir directories for %s\n" \
+ %(section,))
+ sys.exit(1)
- # right - we've got the directories, we've got the section, we know the
- # url... lets play!
+ # right - we've got the directories, we've got the section, we know the
+ # url... lets play!
- parse_and_deliver(maildir, section, state_dir)
+ parse_and_deliver(maildir, section, state_dir)