u'ul',
u'ol',
u'dl',
+ u'li',
+ u'dt',
+ u'dd',
u'div',
#u'blockquote',
]
self.ignorenodata = False
self.listcount = []
self.urls = []
+ self.images = {}
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
elif tag_name == u'a':
for attr in attrs:
if attr[0].lower() == u'href':
- self.urls.append(attr[1])
+ self.urls.append(attr[1].decode('utf-8'))
self.curdata = self.curdata + u'`'
self.opentags.append(tag_name)
return
url = u''
for attr in attrs:
if attr[0] == 'alt':
- alt = attr[1]
+ alt = attr[1].decode('utf-8')
elif attr[0] == 'src':
- url = attr[1]
+ url = attr[1].decode('utf-8')
if url:
- self.curdata = self.curdata \
- + u' [img:' \
- + unicode( \
- url.encode('utf-8'), \
- 'utf-8')
if alt:
- self.curdata = self.curdata \
- + u'(' \
- + unicode( \
- alt.encode('utf-8'), \
- 'utf-8') \
- + u')'
- self.curdata = self.curdata \
- + u']'
+ if self.images.has_key(alt):
+ if self.images[alt]["url"] == url:
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ while self.images.has_key(alt):
+ alt = alt + "_"
+ self.images[alt]["url"] = url
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ self.images[alt] = {}
+ self.images[alt]["url"] = url
+ self.curdata = self.curdata \
+ + u'|%s|' %(alt,)
+ else:
+ if self.images.has_key(url):
+ self.curdata = self.curdata \
+ + u'|%s|' %(url,)
+ else:
+ self.images[url] = {}
+ self.images[url]["url"] =url
+ self.curdata = self.curdata \
+ + u'|%s|' %(url,)
def handle_curdata(self):
if self.ignorenodata:
newlinerequired = False
self.ignorenodata = False
- if newlinerequired \
- and len(self.text) > 2 \
- and self.text[-1] != u'\n' \
- and self.text[-2] != u'\n':
+ if newlinerequired:
+ if tag_thats_done in [u'dt', u'dd', u'li'] \
+ and len(self.text) > 1 \
+ and self.text[-1] != u'\n':
+ self.text = self.text + u'\n'
+ elif len(self.text) > 2 \
+ and self.text[-1] != u'\n' \
+ and self.text[-2] != u'\n':
self.text = self.text + u'\n\n'
if tag_thats_done in ["h1", "h2", "h3", "h4", "h5", "h6"]:
underline = u''
underlinechar = u'='
- headingtext = unicode( \
- self.curdata.encode("utf-8").strip(), "utf-8")
+ headingtext = " ".join(self.curdata.split())
seperator = u'\n' + u' '*self.indentlevel
headingtext = seperator.join( \
textwrap.wrap( \
underline = u' ' * self.indentlevel \
+ underlinechar * len(headingtext)
self.text = self.text \
- + headingtext.encode("utf-8") + u'\n' \
+ + headingtext + u'\n' \
+ underline
elif tag_thats_done in [u'p', u'div']:
paragraph = unicode( \
- self.curdata.strip().encode("utf-8"), "utf-8")
+ " ".join(self.curdata.strip().encode("utf-8").split()), \
+ "utf-8")
seperator = u'\n' + u' ' * self.indentlevel
self.text = self.text \
+ u' ' * self.indentlevel \
self.curdata.encode("utf-8"), "utf-8")
elif tag_thats_done == u'blockquote':
quote = unicode( \
- self.curdata.encode("utf-8").strip(), "utf-8")
+ " ".join(self.curdata.encode("utf-8").strip().split()), \
+ "utf-8")
seperator = u'\n' + u' ' * self.indentlevel + u'> '
if len(self.text) > 0 and self.text[-1] != u'\n':
self.text = self.text + u'\n'
)
self.curdata = u''
elif tag_thats_done == u'dt':
- definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
+ definition = unicode(" ".join( \
+ self.curdata.encode("utf-8").strip().split()), \
+ "utf-8")
if len(self.text) > 0 and self.text[-1] != u'\n':
self.text = self.text + u'\n\n'
elif len(self.text) > 1 and self.text[-2] != u'\n':
self.textwidth - self.indentlevel - 1))
self.curdata = u''
elif tag_thats_done == u'dd':
- definition = unicode(self.curdata.encode("utf-8").strip(), "utf-8")
+ definition = unicode(" ".join( \
+ self.curdata.encode("utf-8").strip().split()),
+ "utf-8")
if len(definition) > 0:
if len(self.text) > 0 and self.text[-1] != u'\n':
self.text = self.text + u'\n'
def handle_data(self, data):
if len(self.opentags) == 0:
self.opentags.append(u'p')
- self.curdata = self.curdata + unicode(data, "utf-8")
+ self.curdata = self.curdata + data.decode("utf-8")
def handle_entityref(self, name):
entity = name
if len(self.urls) > 0:
self.text = self.text + u'\n__ ' + u'\n__ '.join(self.urls) + u'\n'
self.urls = []
+ if len(self.images.keys()) > 0:
+ self.text = self.text + u'\n.. ' \
+ + u'.. '.join( \
+ ["|%s| image:: %s" %(a, self.images[a]["url"]) \
+ for a in self.images.keys()]) + u'\n'
+ self.images = {}
return self.text
def open_url(method, url):
elif scp.has_option("general", "state_dir"):
new_state_dir = scp.get("general", "state_dir")
try:
- mode = os.stat(state_dir)[stat.ST_MODE]
+ mode = os.stat(new_state_dir)[stat.ST_MODE]
if not stat.S_ISDIR(mode):
sys.stderr.write( \
"State directory (%s) is not a directory\n" %(state_dir))
sys.exit(1)
+ else:
+ state_dir = new_state_dir
except:
# try to create it
try: