commit 624bee1001abcd5d16c9dc4370f566718891f8d1
parent ded2937411793278c2a9cd87fa79c96280be50ae
Author: Dan Callaghan <djc@djc.id.au>
Date: Wed, 17 Sep 2008 19:18:00 +1000
replaced dodgy markdown with markdown2 from http://pypi.python.org/packages/source/m/markdown2/markdown2-1.0.1.9.tar.gz
Diffstat:
6 files changed, 1856 insertions(+), 2103 deletions(-)
diff --git a/TODO b/TODO
@@ -11,7 +11,6 @@
- entire templates
...
- pings, e.g. http://www.technorati.com/developers/ping/
-- http://www.pell.portland.or.us/~orc/Code/markdown/ or http://github.com/jgm/peg-markdown/tree/master
- cycling log
- initially link to Nokia
- eventually write own GPS tracker ...
@@ -26,3 +25,5 @@
- handle reCAPTCHA errors (including no captcha fields submitted!!!)
- invalid offsets (displays every entry at max and 500's on invalid such as alpha)
- config option to add next/prev links to page (as well as the link rels)
+- markdown typography/smartypants
+ - or even just, better markdown
diff --git a/blog.py b/blog.py
@@ -1,6 +1,6 @@
import os, re, uuid, email
from datetime import datetime
-import markdown
+from markdown2 import Markdown
import genshi
import yaml
@@ -94,7 +94,7 @@ class BlogEntry(object):
msg = email.message_from_file(open(os.path.join(self.dir, 'content.txt'), 'r'))
self.metadata = cleanup_metadata(msg.items())
self.raw_body = msg.get_payload().decode('utf8') # XXX encoding
- md = markdown.Markdown(extensions=['typography'])
+ md = Markdown(extras=['code-friendly'])
self.body = genshi.Markup(md.convert(self.raw_body))
self.title = self.metadata['title']
@@ -171,7 +171,7 @@ class Comment(object):
msg = email.message_from_file(open(path, 'r'))
self.metadata = cleanup_metadata(msg.items())
self.raw_body = msg.get_payload().decode('utf8') # XXX encoding
- md = markdown.Markdown(extensions=['typography'], safe_mode='escape')
+ md = Markdown(extras=['code-friendly'], safe_mode='escape')
self.body = genshi.Markup(md.convert(self.raw_body))
self.author = self.metadata.get('from', None)
diff --git a/lib/markdown.py b/lib/markdown.py
@@ -1,1929 +0,0 @@
-#!/usr/bin/env python
-
-version = "1.7"
-version_info = (1,7,0,"rc-2")
-__revision__ = "$Rev: 72 $"
-
-"""
-Python-Markdown
-===============
-
-Converts Markdown to HTML. Basic usage as a module:
-
- import markdown
- md = Markdown()
- html = md.convert(your_text_string)
-
-See http://www.freewisdom.org/projects/python-markdown/ for more
-information and instructions on how to extend the functionality of the
-script. (You might want to read that before you try modifying this
-file.)
-
-Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
-maintained by [Yuri Takhteyev](http://www.freewisdom.org) and [Waylan
-Limberg](http://achinghead.com/).
-
-Contact: yuri [at] freewisdom.org
- waylan [at] gmail.com
-
-License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD
-
-"""
-
-
-import re, sys, codecs
-
-from logging import getLogger, StreamHandler, Formatter, \
- DEBUG, INFO, WARN, ERROR, CRITICAL
-
-
-MESSAGE_THRESHOLD = CRITICAL
-
-
-# Configure debug message logger (the hard way - to support python 2.3)
-logger = getLogger('MARKDOWN')
-logger.setLevel(DEBUG) # This is restricted by handlers later
-console_hndlr = StreamHandler()
-formatter = Formatter('%(name)s-%(levelname)s: "%(message)s"')
-console_hndlr.setFormatter(formatter)
-console_hndlr.setLevel(MESSAGE_THRESHOLD)
-logger.addHandler(console_hndlr)
-
-
-def message(level, text):
- ''' A wrapper method for logging debug messages. '''
- logger.log(level, text)
-
-
-# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
-
-TAB_LENGTH = 4 # expand tabs to this many spaces
-ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
-SMART_EMPHASIS = 1 # this_or_that does not become this<i>or</i>that
-HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
-
-RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
- # from Hebrew to Nko (includes Arabic, Syriac and Thaana)
- (u'\u2D30', u'\u2D7F'),
- # Tifinagh
- )
-
-# Unicode Reference Table:
-# 0590-05FF - Hebrew
-# 0600-06FF - Arabic
-# 0700-074F - Syriac
-# 0750-077F - Arabic Supplement
-# 0780-07BF - Thaana
-# 07C0-07FF - Nko
-
-BOMS = { 'utf-8': (codecs.BOM_UTF8, ),
- 'utf-16': (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE),
- #'utf-32': (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)
- }
-
-def removeBOM(text, encoding):
- convert = isinstance(text, unicode)
- for bom in BOMS[encoding]:
- bom = convert and bom.decode(encoding) or bom
- if text.startswith(bom):
- return text.lstrip(bom)
- return text
-
-# The following constant specifies the name used in the usage
-# statement displayed for python versions lower than 2.3. (With
-# python2.3 and higher the usage statement is generated by optparse
-# and uses the actual name of the executable called.)
-
-EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
-
-
-# --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ----------
-
-# a template for html placeholders
-HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas"
-HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls"
-
-BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table',
- 'dl', 'ol', 'ul', 'script', 'noscript',
- 'form', 'fieldset', 'iframe', 'math', 'ins',
- 'del', 'hr', 'hr/', 'style']
-
-def isBlockLevel (tag):
- return ( (tag in BLOCK_LEVEL_ELEMENTS) or
- (tag[0] == 'h' and tag[1] in "0123456789") )
-
-"""
-======================================================================
-========================== NANODOM ===================================
-======================================================================
-
-The three classes below implement some of the most basic DOM
-methods. I use this instead of minidom because I need a simpler
-functionality and do not want to require additional libraries.
-
-Importantly, NanoDom does not do normalization, which is what we
-want. It also adds extra white space when converting DOM to string
-"""
-
-ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"),
- (re.compile("<"), "<"),
- (re.compile(">"), ">")]
-
-ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"),
- (re.compile("<"), "<"),
- (re.compile(">"), ">"),
- (re.compile("\""), """)]
-
-
-def getBidiType(text):
-
- if not text: return None
-
- ch = text[0]
-
- if not isinstance(ch, unicode) or not ch.isalpha():
- return None
-
- else:
-
- for min, max in RTL_BIDI_RANGES:
- if ( ch >= min and ch <= max ):
- return "rtl"
- else:
- return "ltr"
-
-
-class Document:
-
- def __init__ (self):
- self.bidi = "ltr"
-
- def appendChild(self, child):
- self.documentElement = child
- child.isDocumentElement = True
- child.parent = self
- self.entities = {}
-
- def setBidi(self, bidi):
- if bidi:
- self.bidi = bidi
-
- def createElement(self, tag, textNode=None):
- el = Element(tag)
- el.doc = self
- if textNode:
- el.appendChild(self.createTextNode(textNode))
- return el
-
- def createTextNode(self, text):
- node = TextNode(text)
- node.doc = self
- return node
-
- def createEntityReference(self, entity):
- if entity not in self.entities:
- self.entities[entity] = EntityReference(entity)
- return self.entities[entity]
-
- def createCDATA(self, text):
- node = CDATA(text)
- node.doc = self
- return node
-
- def toxml (self):
- return self.documentElement.toxml()
-
- def normalizeEntities(self, text, avoidDoubleNormalizing=False):
-
- if avoidDoubleNormalizing:
- regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT
- else:
- regexps = ENTITY_NORMALIZATION_EXPRESSIONS
-
- for regexp, substitution in regexps:
- text = regexp.sub(substitution, text)
- return text
-
- def find(self, test):
- return self.documentElement.find(test)
-
- def unlink(self):
- self.documentElement.unlink()
- self.documentElement = None
-
-
-class CDATA:
-
- type = "cdata"
-
- def __init__ (self, text):
- self.text = text
-
- def handleAttributes(self):
- pass
-
- def toxml (self):
- return "<![CDATA[" + self.text + "]]>"
-
-class Element:
-
- type = "element"
-
- def __init__ (self, tag):
-
- self.nodeName = tag
- self.attributes = []
- self.attribute_values = {}
- self.childNodes = []
- self.bidi = None
- self.isDocumentElement = False
-
- def setBidi(self, bidi):
-
- if bidi:
-
- orig_bidi = self.bidi
-
- if not self.bidi or self.isDocumentElement:
- # Once the bidi is set don't change it (except for doc element)
- self.bidi = bidi
- self.parent.setBidi(bidi)
-
-
- def unlink(self):
- for child in self.childNodes:
- if child.type == "element":
- child.unlink()
- self.childNodes = None
-
- def setAttribute(self, attr, value):
- if not attr in self.attributes:
- self.attributes.append(attr)
-
- self.attribute_values[attr] = value
-
- def insertChild(self, position, child):
- self.childNodes.insert(position, child)
- child.parent = self
-
- def removeChild(self, child):
- self.childNodes.remove(child)
-
- def replaceChild(self, oldChild, newChild):
- position = self.childNodes.index(oldChild)
- self.removeChild(oldChild)
- self.insertChild(position, newChild)
-
- def appendChild(self, child):
- self.childNodes.append(child)
- child.parent = self
-
- def handleAttributes(self):
- pass
-
- def find(self, test, depth=0):
- """ Returns a list of descendants that pass the test function """
- matched_nodes = []
- for child in self.childNodes:
- if test(child):
- matched_nodes.append(child)
- if child.type == "element":
- matched_nodes += child.find(test, depth+1)
- return matched_nodes
-
- def toxml(self):
- if ENABLE_ATTRIBUTES:
- for child in self.childNodes:
- child.handleAttributes()
-
- buffer = ""
- if self.nodeName in ['h1', 'h2', 'h3', 'h4']:
- buffer += "\n"
- elif self.nodeName in ['li']:
- buffer += "\n "
-
- # Process children FIRST, then do the attributes
-
- childBuffer = ""
-
- if self.childNodes or self.nodeName in ['blockquote']:
- childBuffer += ">"
- for child in self.childNodes:
- childBuffer += child.toxml()
- if self.nodeName == 'p':
- childBuffer += "\n"
- elif self.nodeName == 'li':
- childBuffer += "\n "
- childBuffer += "</%s>" % self.nodeName
- else:
- childBuffer += "/>"
-
-
-
- buffer += "<" + self.nodeName
-
- if self.nodeName in ['p', 'li', 'ul', 'ol',
- 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
-
- if not self.attribute_values.has_key("dir"):
- if self.bidi:
- bidi = self.bidi
- else:
- bidi = self.doc.bidi
-
- if bidi=="rtl":
- self.setAttribute("dir", "rtl")
-
- for attr in self.attributes:
- value = self.attribute_values[attr]
- value = self.doc.normalizeEntities(value,
- avoidDoubleNormalizing=True)
- buffer += ' %s="%s"' % (attr, value)
-
-
- # Now let's actually append the children
-
- buffer += childBuffer
-
- if self.nodeName in ['p', 'br ', 'li', 'ul', 'ol',
- 'h1', 'h2', 'h3', 'h4'] :
- buffer += "\n"
-
- return buffer
-
-
-class TextNode:
-
- type = "text"
- attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
-
- def __init__ (self, text):
- self.value = text
-
- def attributeCallback(self, match):
-
- self.parent.setAttribute(match.group(1), match.group(2))
-
- def handleAttributes(self):
- self.value = self.attrRegExp.sub(self.attributeCallback, self.value)
-
- def toxml(self):
-
- text = self.value
-
- self.parent.setBidi(getBidiType(text))
-
- if not text.startswith(HTML_PLACEHOLDER_PREFIX):
- if self.parent.nodeName == "p":
- text = text.replace("\n", "\n ")
- elif (self.parent.nodeName == "li"
- and self.parent.childNodes[0]==self):
- text = "\n " + text.replace("\n", "\n ")
- text = self.doc.normalizeEntities(text)
- return text
-
-
-class EntityReference:
-
- type = "entity_ref"
-
- def __init__(self, entity):
- self.entity = entity
-
- def handleAttributes(self):
- pass
-
- def toxml(self):
- return "&" + self.entity + ";"
-
-
-"""
-======================================================================
-========================== PRE-PROCESSORS ============================
-======================================================================
-
-Preprocessors munge source text before we start doing anything too
-complicated.
-
-There are two types of preprocessors: TextPreprocessor and Preprocessor.
-
-"""
-
-
-class TextPreprocessor:
- '''
- TextPreprocessors are run before the text is broken into lines.
-
- Each TextPreprocessor implements a "run" method that takes a pointer to a
- text string of the document, modifies it as necessary and returns
- either the same pointer or a pointer to a new string.
-
- TextPreprocessors must extend markdown.TextPreprocessor.
- '''
-
- def run(self, text):
- pass
-
-
-class Preprocessor:
- '''
- Preprocessors are run after the text is broken into lines.
-
- Each preprocessor implements a "run" method that takes a pointer to a
- list of lines of the document, modifies it as necessary and returns
- either the same pointer or a pointer to a new list.
-
- Preprocessors must extend markdown.Preprocessor.
- '''
-
- def run(self, lines):
- pass
-
-
-class HtmlBlockPreprocessor(TextPreprocessor):
- """Removes html blocks from the source text and stores it."""
-
- def _get_left_tag(self, block):
- return block[1:].replace(">", " ", 1).split()[0].lower()
-
-
- def _get_right_tag(self, left_tag, block):
- return block.rstrip()[-len(left_tag)-2:-1].lower()
-
- def _equal_tags(self, left_tag, right_tag):
-
- if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
- return True
- if ("/" + left_tag) == right_tag:
- return True
- if (right_tag == "--" and left_tag == "--"):
- return True
- elif left_tag == right_tag[1:] \
- and right_tag[0] != "<":
- return True
- else:
- return False
-
- def _is_oneliner(self, tag):
- return (tag in ['hr', 'hr/'])
-
-
- def run(self, text):
-
- new_blocks = []
- text = text.split("\n\n")
-
- items = []
- left_tag = ''
- right_tag = ''
- in_tag = False # flag
-
- for block in text:
- if block.startswith("\n"):
- block = block[1:]
-
- if not in_tag:
-
- if block.startswith("<"):
-
- left_tag = self._get_left_tag(block)
- right_tag = self._get_right_tag(left_tag, block)
-
- if not (isBlockLevel(left_tag) \
- or block[1] in ["!", "?", "@", "%"]):
- new_blocks.append(block)
- continue
-
- if self._is_oneliner(left_tag):
- new_blocks.append(block.strip())
- continue
-
- if block[1] == "!":
- # is a comment block
- left_tag = "--"
- right_tag = self._get_right_tag(left_tag, block)
- # keep checking conditions below and maybe just append
-
- if block.rstrip().endswith(">") \
- and self._equal_tags(left_tag, right_tag):
- new_blocks.append(
- self.stash.store(block.strip()))
- continue
- else: #if not block[1] == "!":
- # if is block level tag and is not complete
- items.append(block.strip())
- in_tag = True
- continue
-
- new_blocks.append(block)
-
- else:
- items.append(block.strip())
-
- right_tag = self._get_right_tag(left_tag, block)
-
- if self._equal_tags(left_tag, right_tag):
- # if find closing tag
- in_tag = False
- new_blocks.append(
- self.stash.store('\n\n'.join(items)))
- items = []
-
- if items:
- new_blocks.append(self.stash.store('\n\n'.join(items)))
- new_blocks.append('\n')
-
- return "\n\n".join(new_blocks)
-
-HTML_BLOCK_PREPROCESSOR = HtmlBlockPreprocessor()
-
-
-class HeaderPreprocessor(Preprocessor):
-
- """
- Replaces underlined headers with hashed headers to avoid
- the nead for lookahead later.
- """
-
- def run (self, lines):
-
- i = -1
- while i+1 < len(lines):
- i = i+1
- if not lines[i].strip():
- continue
-
- if lines[i].startswith("#"):
- lines.insert(i+1, "\n")
-
- if (i+1 <= len(lines)
- and lines[i+1]
- and lines[i+1][0] in ['-', '=']):
-
- underline = lines[i+1].strip()
-
- if underline == "="*len(underline):
- lines[i] = "# " + lines[i].strip()
- lines[i+1] = ""
- elif underline == "-"*len(underline):
- lines[i] = "## " + lines[i].strip()
- lines[i+1] = ""
-
- return lines
-
-HEADER_PREPROCESSOR = HeaderPreprocessor()
-
-
-class LinePreprocessor(Preprocessor):
- """Deals with HR lines (needs to be done before processing lists)"""
-
- blockquote_re = re.compile(r'^(> )+')
-
- def run (self, lines):
- for i in range(len(lines)):
- prefix = ''
- m = self.blockquote_re.search(lines[i])
- if m : prefix = m.group(0)
- if self._isLine(lines[i][len(prefix):]):
- lines[i] = prefix + self.stash.store("<hr />", safe=True)
- return lines
-
- def _isLine(self, block):
- """Determines if a block should be replaced with an <HR>"""
- if block.startswith(" "): return 0 # a code block
- text = "".join([x for x in block if not x.isspace()])
- if len(text) <= 2:
- return 0
- for pattern in ['isline1', 'isline2', 'isline3']:
- m = RE.regExp[pattern].match(text)
- if (m and m.group(1)):
- return 1
- else:
- return 0
-
-LINE_PREPROCESSOR = LinePreprocessor()
-
-
-class ReferencePreprocessor(Preprocessor):
- '''
- Removes reference definitions from the text and stores them for later use.
- '''
-
- def run (self, lines):
-
- new_text = [];
- for line in lines:
- m = RE.regExp['reference-def'].match(line)
- if m:
- id = m.group(2).strip().lower()
- t = m.group(4).strip() # potential title
- if not t:
- self.references[id] = (m.group(3), t)
- elif (len(t) >= 2
- and (t[0] == t[-1] == "\""
- or t[0] == t[-1] == "\'"
- or (t[0] == "(" and t[-1] == ")") ) ):
- self.references[id] = (m.group(3), t[1:-1])
- else:
- new_text.append(line)
- else:
- new_text.append(line)
-
- return new_text #+ "\n"
-
-REFERENCE_PREPROCESSOR = ReferencePreprocessor()
-
-"""
-======================================================================
-========================== INLINE PATTERNS ===========================
-======================================================================
-
-Inline patterns such as *emphasis* are handled by means of auxiliary
-objects, one per pattern. Pattern objects must be instances of classes
-that extend markdown.Pattern. Each pattern object uses a single regular
-expression and needs support the following methods:
-
- pattern.getCompiledRegExp() - returns a regular expression
-
- pattern.handleMatch(m, doc) - takes a match object and returns
- a NanoDom node (as a part of the provided
- doc) or None
-
-All of python markdown's built-in patterns subclass from Patter,
-but you can add additional patterns that don't.
-
-Also note that all the regular expressions used by inline must
-capture the whole block. For this reason, they all start with
-'^(.*)' and end with '(.*)!'. In case with built-in expression
-Pattern takes care of adding the "^(.*)" and "(.*)!".
-
-Finally, the order in which regular expressions are applied is very
-important - e.g. if we first replace http://.../ links with <a> tags
-and _then_ try to replace inline html, we would end up with a mess.
-So, we apply the expressions in the following order:
-
- * escape and backticks have to go before everything else, so
- that we can preempt any markdown patterns by escaping them.
-
- * then we handle auto-links (must be done before inline html)
-
- * then we handle inline HTML. At this point we will simply
- replace all inline HTML strings with a placeholder and add
- the actual HTML to a hash.
-
- * then inline images (must be done before links)
-
- * then bracketed links, first regular then reference-style
-
- * finally we apply strong and emphasis
-"""
-
-NOBRACKET = r'[^\]\[]*'
-BRK = ( r'\[('
- + (NOBRACKET + r'(\[')*6
- + (NOBRACKET+ r'\])*')*6
- + NOBRACKET + r')\]' )
-NOIMG = r'(?<!\!)'
-
-BACKTICK_RE = r'\`([^\`]*)\`' # `e= m*c^2`
-DOUBLE_BACKTICK_RE = r'\`\`(.*)\`\`' # ``e=f("`")``
-ESCAPE_RE = r'\\(.)' # \<
-EMPHASIS_RE = r'\*([^\*]*)\*' # *emphasis*
-STRONG_RE = r'\*\*(.*)\*\*' # **strong**
-STRONG_EM_RE = r'\*\*\*([^_]*)\*\*\*' # ***strong***
-
-if SMART_EMPHASIS:
- EMPHASIS_2_RE = r'(?<!\S)_(\S[^_]*)_' # _emphasis_
-else:
- EMPHASIS_2_RE = r'_([^_]*)_' # _emphasis_
-
-STRONG_2_RE = r'__([^_]*)__' # __strong__
-STRONG_EM_2_RE = r'___([^_]*)___' # ___strong___
-
-LINK_RE = NOIMG + BRK + r'\s*\(([^\)]*)\)' # [text](url)
-LINK_ANGLED_RE = NOIMG + BRK + r'\s*\(<([^\)]*)>\)' # [text](<url>)
-IMAGE_LINK_RE = r'\!' + BRK + r'\s*\(([^\)]*)\)' # ![alttxt](http://x.com/)
-REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
-IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
-NOT_STRONG_RE = r'( \* )' # stand-alone * or _
-AUTOLINK_RE = r'<(http://[^>]*)>' # <http://www.123.com>
-AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
-#HTML_RE = r'(\<[^\>]*\>)' # <...>
-HTML_RE = r'(\<[a-zA-Z/][^\>]*\>)' # <...>
-ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
-LINE_BREAK_RE = r' \n' # two spaces at end of line
-LINE_BREAK_2_RE = r' $' # two spaces at end of text
-
-class Pattern:
-
- def __init__ (self, pattern):
- self.pattern = pattern
- self.compiled_re = re.compile("^(.*)%s(.*)$" % pattern, re.DOTALL)
-
- def getCompiledRegExp (self):
- return self.compiled_re
-
-BasePattern = Pattern # for backward compatibility
-
-class SimpleTextPattern (Pattern):
-
- def handleMatch(self, m, doc):
- return doc.createTextNode(m.group(2))
-
-class SimpleTagPattern (Pattern):
-
- def __init__ (self, pattern, tag):
- Pattern.__init__(self, pattern)
- self.tag = tag
-
- def handleMatch(self, m, doc):
- el = doc.createElement(self.tag)
- el.appendChild(doc.createTextNode(m.group(2)))
- return el
-
-class SubstituteTagPattern (SimpleTagPattern):
-
- def handleMatch (self, m, doc):
- return doc.createElement(self.tag)
-
-class BacktickPattern (Pattern):
-
- def __init__ (self, pattern):
- Pattern.__init__(self, pattern)
- self.tag = "code"
-
- def handleMatch(self, m, doc):
- el = doc.createElement(self.tag)
- text = m.group(2).strip()
- #text = text.replace("&", "&")
- el.appendChild(doc.createTextNode(text))
- return el
-
-
-class DoubleTagPattern (SimpleTagPattern):
-
- def handleMatch(self, m, doc):
- tag1, tag2 = self.tag.split(",")
- el1 = doc.createElement(tag1)
- el2 = doc.createElement(tag2)
- el1.appendChild(el2)
- el2.appendChild(doc.createTextNode(m.group(2)))
- return el1
-
-
-class HtmlPattern (Pattern):
-
- def handleMatch (self, m, doc):
- rawhtml = m.group(2)
- inline = True
- place_holder = self.stash.store(rawhtml)
- return doc.createTextNode(place_holder)
-
-
-class LinkPattern (Pattern):
-
- def handleMatch(self, m, doc):
- el = doc.createElement('a')
- el.appendChild(doc.createTextNode(m.group(2)))
- parts = m.group(9).split('"')
- # We should now have [], [href], or [href, title]
- if parts:
- el.setAttribute('href', parts[0].strip())
- else:
- el.setAttribute('href', "")
- if len(parts) > 1:
- # we also got a title
- title = '"' + '"'.join(parts[1:]).strip()
- title = dequote(title) #.replace('"', """)
- el.setAttribute('title', title)
- return el
-
-
-class ImagePattern (Pattern):
-
- def handleMatch(self, m, doc):
- el = doc.createElement('img')
- src_parts = m.group(9).split()
- if src_parts:
- el.setAttribute('src', src_parts[0])
- else:
- el.setAttribute('src', "")
- if len(src_parts) > 1:
- el.setAttribute('title', dequote(" ".join(src_parts[1:])))
- if ENABLE_ATTRIBUTES:
- text = doc.createTextNode(m.group(2))
- el.appendChild(text)
- text.handleAttributes()
- truealt = text.value
- el.childNodes.remove(text)
- else:
- truealt = m.group(2)
- el.setAttribute('alt', truealt)
- return el
-
-class ReferencePattern (Pattern):
-
- def handleMatch(self, m, doc):
-
- if m.group(9):
- id = m.group(9).lower()
- else:
- # if we got something like "[Google][]"
- # we'll use "google" as the id
- id = m.group(2).lower()
-
- if not self.references.has_key(id): # ignore undefined refs
- return None
- href, title = self.references[id]
- text = m.group(2)
- return self.makeTag(href, title, text, doc)
-
- def makeTag(self, href, title, text, doc):
- el = doc.createElement('a')
- el.setAttribute('href', href)
- if title:
- el.setAttribute('title', title)
- el.appendChild(doc.createTextNode(text))
- return el
-
-
-class ImageReferencePattern (ReferencePattern):
-
- def makeTag(self, href, title, text, doc):
- el = doc.createElement('img')
- el.setAttribute('src', href)
- if title:
- el.setAttribute('title', title)
- el.setAttribute('alt', text)
- return el
-
-
-class AutolinkPattern (Pattern):
-
- def handleMatch(self, m, doc):
- el = doc.createElement('a')
- el.setAttribute('href', m.group(2))
- el.appendChild(doc.createTextNode(m.group(2)))
- return el
-
-class AutomailPattern (Pattern):
-
- def handleMatch(self, m, doc):
- el = doc.createElement('a')
- email = m.group(2)
- if email.startswith("mailto:"):
- email = email[len("mailto:"):]
- for letter in email:
- entity = doc.createEntityReference("#%d" % ord(letter))
- el.appendChild(entity)
- mailto = "mailto:" + email
- mailto = "".join(['&#%d;' % ord(letter) for letter in mailto])
- el.setAttribute('href', mailto)
- return el
-
-ESCAPE_PATTERN = SimpleTextPattern(ESCAPE_RE)
-NOT_STRONG_PATTERN = SimpleTextPattern(NOT_STRONG_RE)
-
-BACKTICK_PATTERN = BacktickPattern(BACKTICK_RE)
-DOUBLE_BACKTICK_PATTERN = BacktickPattern(DOUBLE_BACKTICK_RE)
-STRONG_PATTERN = SimpleTagPattern(STRONG_RE, 'strong')
-STRONG_PATTERN_2 = SimpleTagPattern(STRONG_2_RE, 'strong')
-EMPHASIS_PATTERN = SimpleTagPattern(EMPHASIS_RE, 'em')
-EMPHASIS_PATTERN_2 = SimpleTagPattern(EMPHASIS_2_RE, 'em')
-
-STRONG_EM_PATTERN = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
-STRONG_EM_PATTERN_2 = DoubleTagPattern(STRONG_EM_2_RE, 'strong,em')
-
-LINE_BREAK_PATTERN = SubstituteTagPattern(LINE_BREAK_RE, 'br ')
-LINE_BREAK_PATTERN_2 = SubstituteTagPattern(LINE_BREAK_2_RE, 'br ')
-
-LINK_PATTERN = LinkPattern(LINK_RE)
-LINK_ANGLED_PATTERN = LinkPattern(LINK_ANGLED_RE)
-IMAGE_LINK_PATTERN = ImagePattern(IMAGE_LINK_RE)
-IMAGE_REFERENCE_PATTERN = ImageReferencePattern(IMAGE_REFERENCE_RE)
-REFERENCE_PATTERN = ReferencePattern(REFERENCE_RE)
-
-HTML_PATTERN = HtmlPattern(HTML_RE)
-ENTITY_PATTERN = HtmlPattern(ENTITY_RE)
-
-AUTOLINK_PATTERN = AutolinkPattern(AUTOLINK_RE)
-AUTOMAIL_PATTERN = AutomailPattern(AUTOMAIL_RE)
-
-
-"""
-======================================================================
-========================== POST-PROCESSORS ===========================
-======================================================================
-
-Markdown also allows post-processors, which are similar to
-preprocessors in that they need to implement a "run" method. However,
-they are run after core processing.
-
-There are two types of post-processors: Postprocessor and TextPostprocessor
-"""
-
-
-class Postprocessor:
- '''
- Postprocessors are run before the dom it converted back into text.
-
- Each Postprocessor implements a "run" method that takes a pointer to a
- NanoDom document, modifies it as necessary and returns a NanoDom
- document.
-
- Postprocessors must extend markdown.Postprocessor.
-
- There are currently no standard post-processors, but the footnote
- extension uses one.
- '''
-
- def run(self, dom):
- pass
-
-
-
-class TextPostprocessor:
- '''
- TextPostprocessors are run after the dom it converted back into text.
-
- Each TextPostprocessor implements a "run" method that takes a pointer to a
- text string, modifies it as necessary and returns a text string.
-
- TextPostprocessors must extend markdown.TextPostprocessor.
- '''
-
- def run(self, text):
- pass
-
-
-class RawHtmlTextPostprocessor(TextPostprocessor):
-
- def __init__(self):
- pass
-
- def run(self, text):
- for i in range(self.stash.html_counter):
- html, safe = self.stash.rawHtmlBlocks[i]
- if self.safeMode and not safe:
- if str(self.safeMode).lower() == 'escape':
- html = self.escape(html)
- elif str(self.safeMode).lower() == 'remove':
- html = ''
- else:
- html = HTML_REMOVED_TEXT
-
- text = text.replace("<p>%s\n</p>" % (HTML_PLACEHOLDER % i),
- html + "\n")
- text = text.replace(HTML_PLACEHOLDER % i, html)
- return text
-
- def escape(self, html):
- ''' Basic html escaping '''
- html = html.replace('&', '&')
- html = html.replace('<', '<')
- html = html.replace('>', '>')
- return html.replace('"', '"')
-
-RAWHTMLTEXTPOSTPROCESSOR = RawHtmlTextPostprocessor()
-
-"""
-======================================================================
-========================== MISC AUXILIARY CLASSES ====================
-======================================================================
-"""
-
-class HtmlStash:
- """This class is used for stashing HTML objects that we extract
- in the beginning and replace with place-holders."""
-
- def __init__ (self):
- self.html_counter = 0 # for counting inline html segments
- self.rawHtmlBlocks=[]
-
- def store(self, html, safe=False):
- """Saves an HTML segment for later reinsertion. Returns a
- placeholder string that needs to be inserted into the
- document.
-
- @param html: an html segment
- @param safe: label an html segment as safe for safemode
- @param inline: label a segmant as inline html
- @returns : a placeholder string """
- self.rawHtmlBlocks.append((html, safe))
- placeholder = HTML_PLACEHOLDER % self.html_counter
- self.html_counter += 1
- return placeholder
-
-
-class BlockGuru:
-
- def _findHead(self, lines, fn, allowBlank=0):
-
- """Functional magic to help determine boundaries of indented
- blocks.
-
- @param lines: an array of strings
- @param fn: a function that returns a substring of a string
- if the string matches the necessary criteria
- @param allowBlank: specifies whether it's ok to have blank
- lines between matching functions
- @returns: a list of post processes items and the unused
- remainder of the original list"""
-
- items = []
- item = -1
-
- i = 0 # to keep track of where we are
-
- for line in lines:
-
- if not line.strip() and not allowBlank:
- return items, lines[i:]
-
- if not line.strip() and allowBlank:
- # If we see a blank line, this _might_ be the end
- i += 1
-
- # Find the next non-blank line
- for j in range(i, len(lines)):
- if lines[j].strip():
- next = lines[j]
- break
- else:
- # There is no more text => this is the end
- break
-
- # Check if the next non-blank line is still a part of the list
-
- part = fn(next)
-
- if part:
- items.append("")
- continue
- else:
- break # found end of the list
-
- part = fn(line)
-
- if part:
- items.append(part)
- i += 1
- continue
- else:
- return items, lines[i:]
- else:
- i += 1
-
- return items, lines[i:]
-
-
- def detabbed_fn(self, line):
- """ An auxiliary method to be passed to _findHead """
- m = RE.regExp['tabbed'].match(line)
- if m:
- return m.group(4)
- else:
- return None
-
-
- def detectTabbed(self, lines):
-
- return self._findHead(lines, self.detabbed_fn,
- allowBlank = 1)
-
-
-def print_error(string):
- """Print an error string to stderr"""
- sys.stderr.write(string +'\n')
-
-
-def dequote(string):
- """ Removes quotes from around a string """
- if ( ( string.startswith('"') and string.endswith('"'))
- or (string.startswith("'") and string.endswith("'")) ):
- return string[1:-1]
- else:
- return string
-
-"""
-======================================================================
-========================== CORE MARKDOWN =============================
-======================================================================
-
-This stuff is ugly, so if you are thinking of extending the syntax,
-see first if you can do it via pre-processors, post-processors,
-inline patterns or a combination of the three.
-"""
-
-class CorePatterns:
- """This class is scheduled for removal as part of a refactoring
- effort."""
-
- patterns = {
- 'header': r'(#*)([^#]*)(#*)', # # A title
- 'reference-def': r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)',
- # [Google]: http://www.google.com/
- 'containsline': r'([-]*)$|^([=]*)', # -----, =====, etc.
- 'ol': r'[ ]{0,3}[\d]*\.\s+(.*)', # 1. text
- 'ul': r'[ ]{0,3}[*+-]\s+(.*)', # "* text"
- 'isline1': r'(\**)', # ***
- 'isline2': r'(\-*)', # ---
- 'isline3': r'(\_*)', # ___
- 'tabbed': r'((\t)|( ))(.*)', # an indented line
- 'quoted': r'> ?(.*)', # a quoted block ("> ...")
- }
-
- def __init__ (self):
-
- self.regExp = {}
- for key in self.patterns.keys():
- self.regExp[key] = re.compile("^%s$" % self.patterns[key],
- re.DOTALL)
-
- self.regExp['containsline'] = re.compile(r'^([-]*)$|^([=]*)$', re.M)
-
-RE = CorePatterns()
-
-
-class Markdown:
- """ Markdown formatter class for creating an html document from
- Markdown text """
-
-
- def __init__(self, source=None, # depreciated
- extensions=[],
- extension_configs=None,
- safe_mode = False):
- """Creates a new Markdown instance.
-
- @param source: The text in Markdown format. Depreciated!
- @param extensions: A list if extensions.
- @param extension-configs: Configuration setting for extensions.
- @param safe_mode: Disallow raw html. """
-
- self.source = source
- if source is not None:
- message(WARN, "The `source` arg of Markdown.__init__() is depreciated and will be removed in the future. Use `instance.convert(source)` instead.")
- self.safeMode = safe_mode
- self.blockGuru = BlockGuru()
- self.registeredExtensions = []
- self.stripTopLevelTags = 1
- self.docType = ""
-
- self.textPreprocessors = [HTML_BLOCK_PREPROCESSOR]
-
- self.preprocessors = [HEADER_PREPROCESSOR,
- LINE_PREPROCESSOR,
- # A footnote preprocessor will
- # get inserted here
- REFERENCE_PREPROCESSOR]
-
-
- self.postprocessors = [] # a footnote postprocessor will get
- # inserted later
-
- self.textPostprocessors = [# a footnote postprocessor will get
- # inserted here
- RAWHTMLTEXTPOSTPROCESSOR]
-
- self.prePatterns = []
-
-
- self.inlinePatterns = [DOUBLE_BACKTICK_PATTERN,
- BACKTICK_PATTERN,
- ESCAPE_PATTERN,
- REFERENCE_PATTERN,
- LINK_ANGLED_PATTERN,
- LINK_PATTERN,
- IMAGE_LINK_PATTERN,
- IMAGE_REFERENCE_PATTERN,
- AUTOLINK_PATTERN,
- AUTOMAIL_PATTERN,
- LINE_BREAK_PATTERN_2,
- LINE_BREAK_PATTERN,
- HTML_PATTERN,
- ENTITY_PATTERN,
- NOT_STRONG_PATTERN,
- STRONG_EM_PATTERN,
- STRONG_EM_PATTERN_2,
- STRONG_PATTERN,
- STRONG_PATTERN_2,
- EMPHASIS_PATTERN,
- EMPHASIS_PATTERN_2
- # The order of the handlers matters!!!
- ]
-
- self.registerExtensions(extensions = extensions,
- configs = extension_configs)
-
- self.reset()
-
-
- def registerExtensions(self, extensions, configs):
-
- if not configs:
- configs = {}
-
- for ext in extensions:
-
- extension_module_name = "mdx_" + ext
-
- try:
- module = __import__(extension_module_name)
-
- except:
- message(CRITICAL,
- "couldn't load extension %s (looking for %s module)"
- % (ext, extension_module_name) )
- else:
-
- if configs.has_key(ext):
- configs_for_ext = configs[ext]
- else:
- configs_for_ext = []
- extension = module.makeExtension(configs_for_ext)
- extension.extendMarkdown(self, globals())
-
-
-
-
- def registerExtension(self, extension):
- """ This gets called by the extension """
- self.registeredExtensions.append(extension)
-
- def reset(self):
- """Resets all state variables so that we can start
- with a new text."""
- self.references={}
- self.htmlStash = HtmlStash()
-
- HTML_BLOCK_PREPROCESSOR.stash = self.htmlStash
- LINE_PREPROCESSOR.stash = self.htmlStash
- REFERENCE_PREPROCESSOR.references = self.references
- HTML_PATTERN.stash = self.htmlStash
- ENTITY_PATTERN.stash = self.htmlStash
- REFERENCE_PATTERN.references = self.references
- IMAGE_REFERENCE_PATTERN.references = self.references
- RAWHTMLTEXTPOSTPROCESSOR.stash = self.htmlStash
- RAWHTMLTEXTPOSTPROCESSOR.safeMode = self.safeMode
-
- for extension in self.registeredExtensions:
- extension.reset()
-
-
- def _transform(self):
- """Transforms the Markdown text into a XHTML body document
-
- @returns: A NanoDom Document """
-
- # Setup the document
-
- self.doc = Document()
- self.top_element = self.doc.createElement("span")
- self.top_element.appendChild(self.doc.createTextNode('\n'))
- self.top_element.setAttribute('class', 'markdown')
- self.doc.appendChild(self.top_element)
-
- # Fixup the source text
- text = self.source
- text = text.replace("\r\n", "\n").replace("\r", "\n")
- text += "\n\n"
- text = text.expandtabs(TAB_LENGTH)
-
- # Split into lines and run the preprocessors that will work with
- # self.lines
-
- self.lines = text.split("\n")
-
- # Run the pre-processors on the lines
- for prep in self.preprocessors :
- self.lines = prep.run(self.lines)
-
- # Create a NanoDom tree from the lines and attach it to Document
-
-
- buffer = []
- for line in self.lines:
- if line.startswith("#"):
- self._processSection(self.top_element, buffer)
- buffer = [line]
- else:
- buffer.append(line)
- self._processSection(self.top_element, buffer)
-
- #self._processSection(self.top_element, self.lines)
-
- # Not sure why I put this in but let's leave it for now.
- self.top_element.appendChild(self.doc.createTextNode('\n'))
-
- # Run the post-processors
- for postprocessor in self.postprocessors:
- postprocessor.run(self.doc)
-
- return self.doc
-
-
- def _processSection(self, parent_elem, lines,
- inList = 0, looseList = 0):
-
- """Process a section of a source document, looking for high
- level structural elements like lists, block quotes, code
- segments, html blocks, etc. Some those then get stripped
- of their high level markup (e.g. get unindented) and the
- lower-level markup is processed recursively.
-
- @param parent_elem: A NanoDom element to which the content
- will be added
- @param lines: a list of lines
- @param inList: a level
- @returns: None"""
-
- # Loop through lines until none left.
- while lines:
-
- # Check if this section starts with a list, a blockquote or
- # a code block
-
- processFn = { 'ul': self._processUList,
- 'ol': self._processOList,
- 'quoted': self._processQuote,
- 'tabbed': self._processCodeBlock}
-
- for regexp in ['ul', 'ol', 'quoted', 'tabbed']:
- m = RE.regExp[regexp].match(lines[0])
- if m:
- processFn[regexp](parent_elem, lines, inList)
- return
-
- # We are NOT looking at one of the high-level structures like
- # lists or blockquotes. So, it's just a regular paragraph
- # (though perhaps nested inside a list or something else). If
- # we are NOT inside a list, we just need to look for a blank
- # line to find the end of the block. If we ARE inside a
- # list, however, we need to consider that a sublist does not
- # need to be separated by a blank line. Rather, the following
- # markup is legal:
- #
- # * The top level list item
- #
- # Another paragraph of the list. This is where we are now.
- # * Underneath we might have a sublist.
- #
-
- if inList:
-
- start, lines = self._linesUntil(lines, (lambda line:
- RE.regExp['ul'].match(line)
- or RE.regExp['ol'].match(line)
- or not line.strip()))
-
- self._processSection(parent_elem, start,
- inList - 1, looseList = looseList)
- inList = inList-1
-
- else: # Ok, so it's just a simple block
-
- paragraph, lines = self._linesUntil(lines, lambda line:
- not line.strip())
-
- if len(paragraph) and paragraph[0].startswith('#'):
- self._processHeader(parent_elem, paragraph)
-
- elif paragraph:
- self._processParagraph(parent_elem, paragraph,
- inList, looseList)
-
- if lines and not lines[0].strip():
- lines = lines[1:] # skip the first (blank) line
-
-
- def _processHeader(self, parent_elem, paragraph):
- m = RE.regExp['header'].match(paragraph[0])
- if m:
- level = len(m.group(1))
- h = self.doc.createElement("h%d" % level)
- parent_elem.appendChild(h)
- for item in self._handleInline(m.group(2).strip()):
- h.appendChild(item)
- else:
- message(CRITICAL, "We've got a problem header!")
-
-
- def _processParagraph(self, parent_elem, paragraph, inList, looseList):
- list = self._handleInline("\n".join(paragraph))
-
- if ( parent_elem.nodeName == 'li'
- and not (looseList or parent_elem.childNodes)):
-
- # If this is the first paragraph inside "li", don't
- # put <p> around it - append the paragraph bits directly
- # onto parent_elem
- el = parent_elem
- else:
- # Otherwise make a "p" element
- el = self.doc.createElement("p")
- parent_elem.appendChild(el)
-
- for item in list:
- el.appendChild(item)
-
-
- def _processUList(self, parent_elem, lines, inList):
- self._processList(parent_elem, lines, inList,
- listexpr='ul', tag = 'ul')
-
- def _processOList(self, parent_elem, lines, inList):
- self._processList(parent_elem, lines, inList,
- listexpr='ol', tag = 'ol')
-
-
- def _processList(self, parent_elem, lines, inList, listexpr, tag):
- """Given a list of document lines starting with a list item,
- finds the end of the list, breaks it up, and recursively
- processes each list item and the remainder of the text file.
-
- @param parent_elem: A dom element to which the content will be added
- @param lines: a list of lines
- @param inList: a level
- @returns: None"""
-
- ul = self.doc.createElement(tag) # ul might actually be '<ol>'
- parent_elem.appendChild(ul)
-
- looseList = 0
-
- # Make a list of list items
- items = []
- item = -1
-
- i = 0 # a counter to keep track of where we are
-
- for line in lines:
-
- loose = 0
- if not line.strip():
- # If we see a blank line, this _might_ be the end of the list
- i += 1
- loose = 1
-
- # Find the next non-blank line
- for j in range(i, len(lines)):
- if lines[j].strip():
- next = lines[j]
- break
- else:
- # There is no more text => end of the list
- break
-
- # Check if the next non-blank line is still a part of the list
- if ( RE.regExp['ul'].match(next) or
- RE.regExp['ol'].match(next) or
- RE.regExp['tabbed'].match(next) ):
- # get rid of any white space in the line
- items[item].append(line.strip())
- looseList = loose or looseList
- continue
- else:
- break # found end of the list
-
- # Now we need to detect list items (at the current level)
- # while also detabing child elements if necessary
-
- for expr in ['ul', 'ol', 'tabbed']:
-
- m = RE.regExp[expr].match(line)
- if m:
- if expr in ['ul', 'ol']: # We are looking at a new item
- #if m.group(1) :
- # Removed the check to allow for a blank line
- # at the beginning of the list item
- items.append([m.group(1)])
- item += 1
- elif expr == 'tabbed': # This line needs to be detabbed
- items[item].append(m.group(4)) #after the 'tab'
-
- i += 1
- break
- else:
- items[item].append(line) # Just regular continuation
- i += 1 # added on 2006.02.25
- else:
- i += 1
-
- # Add the dom elements
- for item in items:
- li = self.doc.createElement("li")
- ul.appendChild(li)
-
- self._processSection(li, item, inList + 1, looseList = looseList)
-
- # Process the remaining part of the section
-
- self._processSection(parent_elem, lines[i:], inList)
-
-
- def _linesUntil(self, lines, condition):
- """ A utility function to break a list of lines upon the
- first line that satisfied a condition. The condition
- argument should be a predicate function.
- """
-
- i = -1
- for line in lines:
- i += 1
- if condition(line): break
- else:
- i += 1
- return lines[:i], lines[i:]
-
- def _processQuote(self, parent_elem, lines, inList):
- """Given a list of document lines starting with a quote finds
- the end of the quote, unindents it and recursively
- processes the body of the quote and the remainder of the
- text file.
-
- @param parent_elem: DOM element to which the content will be added
- @param lines: a list of lines
- @param inList: a level
- @returns: None """
-
- dequoted = []
- i = 0
- blank_line = False # allow one blank line between paragraphs
- for line in lines:
- m = RE.regExp['quoted'].match(line)
- if m:
- dequoted.append(m.group(1))
- i += 1
- blank_line = False
- elif not blank_line and line.strip() != '':
- dequoted.append(line)
- i += 1
- elif not blank_line and line.strip() == '':
- dequoted.append(line)
- i += 1
- blank_line = True
- else:
- break
-
- blockquote = self.doc.createElement('blockquote')
- parent_elem.appendChild(blockquote)
-
- self._processSection(blockquote, dequoted, inList)
- self._processSection(parent_elem, lines[i:], inList)
-
-
-
-
- def _processCodeBlock(self, parent_elem, lines, inList):
- """Given a list of document lines starting with a code block
- finds the end of the block, puts it into the dom verbatim
- wrapped in ("<pre><code>") and recursively processes the
- the remainder of the text file.
-
- @param parent_elem: DOM element to which the content will be added
- @param lines: a list of lines
- @param inList: a level
- @returns: None"""
-
- detabbed, theRest = self.blockGuru.detectTabbed(lines)
-
- pre = self.doc.createElement('pre')
- code = self.doc.createElement('code')
- parent_elem.appendChild(pre)
- pre.appendChild(code)
- text = "\n".join(detabbed).rstrip()+"\n"
- #text = text.replace("&", "&")
- code.appendChild(self.doc.createTextNode(text))
- self._processSection(parent_elem, theRest, inList)
-
-
-
- def _handleInline (self, line, patternIndex=0):
- """Transform a Markdown line with inline elements to an XHTML
- fragment.
-
- This function uses auxiliary objects called inline patterns.
- See notes on inline patterns above.
-
- @param line: A line of Markdown text
- @param patternIndex: The index of the inlinePattern to start with
- @return: A list of NanoDom nodes """
-
-
- parts = [line]
-
- while patternIndex < len(self.inlinePatterns):
-
- i = 0
-
- while i < len(parts):
-
- x = parts[i]
-
- if isinstance(x, (str, unicode)):
- result = self._applyPattern(x, \
- self.inlinePatterns[patternIndex], \
- patternIndex)
-
- if result:
- i -= 1
- parts.remove(x)
- for y in result:
- parts.insert(i+1,y)
-
- i += 1
- patternIndex += 1
-
- for i in range(len(parts)):
- x = parts[i]
- if isinstance(x, (str, unicode)):
- parts[i] = self.doc.createTextNode(x)
-
- return parts
-
-
- def _applyPattern(self, line, pattern, patternIndex):
-
- """ Given a pattern name, this function checks if the line
- fits the pattern, creates the necessary elements, and returns
- back a list consisting of NanoDom elements and/or strings.
-
- @param line: the text to be processed
- @param pattern: the pattern to be checked
-
- @returns: the appropriate newly created NanoDom element if the
- pattern matches, None otherwise.
- """
-
- # match the line to pattern's pre-compiled reg exp.
- # if no match, move on.
-
-
-
- m = pattern.getCompiledRegExp().match(line)
- if not m:
- return None
-
- # if we got a match let the pattern make us a NanoDom node
- # if it doesn't, move on
- node = pattern.handleMatch(m, self.doc)
-
- # check if any of this nodes have children that need processing
-
- if isinstance(node, Element):
-
- if not node.nodeName in ["code", "pre"]:
- for child in node.childNodes:
- if isinstance(child, TextNode):
-
- result = self._handleInline(child.value, patternIndex+1)
-
- if result:
-
- if result == [child]:
- continue
-
- result.reverse()
- #to make insertion easier
-
- position = node.childNodes.index(child)
-
- node.removeChild(child)
-
- for item in result:
-
- if isinstance(item, (str, unicode)):
- if len(item) > 0:
- node.insertChild(position,
- self.doc.createTextNode(item))
- else:
- node.insertChild(position, item)
-
-
-
-
- if node:
- # Those are in the reverse order!
- return ( m.groups()[-1], # the string to the left
- node, # the new node
- m.group(1)) # the string to the right of the match
-
- else:
- return None
-
- def convert (self, source = None):
- """Return the document in XHTML format.
-
- @returns: A serialized XHTML body."""
-
- if source is not None: #Allow blank string
- self.source = source
-
- if not self.source:
- return u""
-
- try:
- self.source = unicode(self.source)
- except UnicodeDecodeError:
- message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
- return u""
-
- for pp in self.textPreprocessors:
- self.source = pp.run(self.source)
-
- doc = self._transform()
- xml = doc.toxml()
-
-
- # Return everything but the top level tag
-
- if self.stripTopLevelTags:
- xml = xml.strip()[23:-7] + "\n"
-
- for pp in self.textPostprocessors:
- xml = pp.run(xml)
-
- return (self.docType + xml).strip()
-
-
- def __str__(self):
- ''' Report info about instance. Markdown always returns unicode. '''
- if self.source is None:
- status = 'in which no source text has been assinged.'
- else:
- status = 'which contains %d chars and %d line(s) of source.'%\
- (len(self.source), self.source.count('\n')+1)
- return 'An instance of "%s" %s'% (self.__class__, status)
-
- __unicode__ = convert # markdown should always return a unicode string
-
-
-
-
-
-# ====================================================================
-
-def markdownFromFile(input = None,
- output = None,
- extensions = [],
- encoding = None,
- message_threshold = CRITICAL,
- safe = False):
-
- global console_hndlr
- console_hndlr.setLevel(message_threshold)
-
- message(DEBUG, "input file: %s" % input)
-
- if not encoding:
- encoding = "utf-8"
-
- input_file = codecs.open(input, mode="r", encoding=encoding)
- text = input_file.read()
- input_file.close()
-
- text = removeBOM(text, encoding)
-
- new_text = markdown(text, extensions, safe_mode = safe)
-
- if output:
- output_file = codecs.open(output, "w", encoding=encoding)
- output_file.write(new_text)
- output_file.close()
-
- else:
- sys.stdout.write(new_text.encode(encoding))
-
-def markdown(text,
- extensions = [],
- safe_mode = False):
-
- message(DEBUG, "in markdown.markdown(), received text:\n%s" % text)
-
- extension_names = []
- extension_configs = {}
-
- for ext in extensions:
- pos = ext.find("(")
- if pos == -1:
- extension_names.append(ext)
- else:
- name = ext[:pos]
- extension_names.append(name)
- pairs = [x.split("=") for x in ext[pos+1:-1].split(",")]
- configs = [(x.strip(), y.strip()) for (x, y) in pairs]
- extension_configs[name] = configs
-
- md = Markdown(extensions=extension_names,
- extension_configs=extension_configs,
- safe_mode = safe_mode)
-
- return md.convert(text)
-
-
-class Extension:
-
- def __init__(self, configs = {}):
- self.config = configs
-
- def getConfig(self, key):
- if self.config.has_key(key):
- return self.config[key][0]
- else:
- return ""
-
- def getConfigInfo(self):
- return [(key, self.config[key][1]) for key in self.config.keys()]
-
- def setConfig(self, key, value):
- self.config[key][0] = value
-
-
-OPTPARSE_WARNING = """
-Python 2.3 or higher required for advanced command line options.
-For lower versions of Python use:
-
- %s INPUT_FILE > OUTPUT_FILE
-
-""" % EXECUTABLE_NAME_FOR_USAGE
-
-def parse_options():
-
- try:
- optparse = __import__("optparse")
- except:
- if len(sys.argv) == 2:
- return {'input': sys.argv[1],
- 'output': None,
- 'message_threshold': CRITICAL,
- 'safe': False,
- 'extensions': [],
- 'encoding': None }
-
- else:
- print OPTPARSE_WARNING
- return None
-
- parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
-
- parser.add_option("-f", "--file", dest="filename",
- help="write output to OUTPUT_FILE",
- metavar="OUTPUT_FILE")
- parser.add_option("-e", "--encoding", dest="encoding",
- help="encoding for input and output files",)
- parser.add_option("-q", "--quiet", default = CRITICAL,
- action="store_const", const=60, dest="verbose",
- help="suppress all messages")
- parser.add_option("-v", "--verbose",
- action="store_const", const=INFO, dest="verbose",
- help="print info messages")
- parser.add_option("-s", "--safe", dest="safe", default=False,
- metavar="SAFE_MODE",
- help="same mode ('replace', 'remove' or 'escape' user's HTML tag)")
-
- parser.add_option("--noisy",
- action="store_const", const=DEBUG, dest="verbose",
- help="print debug messages")
- parser.add_option("-x", "--extension", action="append", dest="extensions",
- help = "load extension EXTENSION", metavar="EXTENSION")
-
- (options, args) = parser.parse_args()
-
- if not len(args) == 1:
- parser.print_help()
- return None
- else:
- input_file = args[0]
-
- if not options.extensions:
- options.extensions = []
-
- return {'input': input_file,
- 'output': options.filename,
- 'message_threshold': options.verbose,
- 'safe': options.safe,
- 'extensions': options.extensions,
- 'encoding': options.encoding }
-
-if __name__ == '__main__':
- """ Run Markdown from the command line. """
-
- options = parse_options()
-
- #if os.access(inFile, os.R_OK):
-
- if not options:
- sys.exit(0)
-
- markdownFromFile(**options)
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/markdown2.py b/lib/markdown2.py
@@ -0,0 +1,1846 @@
+#!/usr/bin/env python
+# Copyright (c) 2007-2008 ActiveState Corp.
+# License: MIT (http://www.opensource.org/licenses/mit-license.php)
+
+r"""A fast and complete Python implementation of Markdown.
+
+[from http://daringfireball.net/projects/markdown/]
+> Markdown is a text-to-HTML filter; it translates an easy-to-read /
+> easy-to-write structured text format into HTML. Markdown's text
+> format is most similar to that of plain text email, and supports
+> features such as headers, *emphasis*, code blocks, blockquotes, and
+> links.
+>
+> Markdown's syntax is designed not as a generic markup language, but
+> specifically to serve as a front-end to (X)HTML. You can use span-level
+> HTML tags anywhere in a Markdown document, and you can use block level
+> HTML tags (like <div> and <table> as well).
+
+Module usage:
+
+ >>> import markdown2
+ >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
+ u'<p><em>boo!</em></p>\n'
+
+ >>> markdowner = Markdown()
+ >>> markdowner.convert("*boo!*")
+ u'<p><em>boo!</em></p>\n'
+ >>> markdowner.convert("**boom!**")
+ u'<p><strong>boom!</strong></p>\n'
+
+This implementation of Markdown implements the full "core" syntax plus a
+number of extras (e.g., code syntax coloring, footnotes) as described on
+<http://code.google.com/p/python-markdown2/wiki/Extras>.
+"""
+
+cmdln_desc = """A fast and complete Python implementation of Markdown, a
+text-to-HTML conversion tool for web writers.
+"""
+
+# Dev Notes:
+# - There is already a Python markdown processor
+# (http://www.freewisdom.org/projects/python-markdown/).
+# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
+# not yet sure if there implications with this. Compare 'pydoc sre'
+# and 'perldoc perlre'.
+
+__version_info__ = (1, 0, 1, 9) # first three nums match Markdown.pl
+__version__ = '.'.join(map(str, __version_info__))
+__author__ = "Trent Mick"
+
+import os
+import sys
+from pprint import pprint
+import re
+import logging
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+import optparse
+from random import random
+import codecs
+
+
+
+#---- Python version compat
+
+if sys.version_info[:2] < (2,4):
+ from sets import Set as set
+ def reversed(sequence):
+ for i in sequence[::-1]:
+ yield i
+ def _unicode_decode(s, encoding, errors='xmlcharrefreplace'):
+ return unicode(s, encoding, errors)
+else:
+ def _unicode_decode(s, encoding, errors='strict'):
+ return s.decode(encoding, errors)
+
+
+#---- globals
+
+DEBUG = False
+log = logging.getLogger("markdown")
+
+DEFAULT_TAB_WIDTH = 4
+
+# Table of hash values for escaped characters:
+def _escape_hash(s):
+ # Lame attempt to avoid possible collision with someone actually
+ # using the MD5 hexdigest of one of these chars in there text.
+ # Other ideas: random.random(), uuid.uuid()
+ #return md5(s).hexdigest() # Markdown.pl effectively does this.
+ return 'md5:'+md5(s).hexdigest()
+g_escape_table = dict([(ch, _escape_hash(ch))
+ for ch in '\\`*_{}[]()>#+-.!'])
+
+
+
+#---- exceptions
+
+class MarkdownError(Exception):
+ pass
+
+
+
+#---- public api
+
+def markdown_path(path, encoding="utf-8",
+ html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
+ safe_mode=None, extras=None, link_patterns=None,
+ use_file_vars=False):
+ text = codecs.open(path, 'r', encoding).read()
+ return Markdown(html4tags=html4tags, tab_width=tab_width,
+ safe_mode=safe_mode, extras=extras,
+ link_patterns=link_patterns,
+ use_file_vars=use_file_vars).convert(text)
+
+def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
+ safe_mode=None, extras=None, link_patterns=None,
+ use_file_vars=False):
+ return Markdown(html4tags=html4tags, tab_width=tab_width,
+ safe_mode=safe_mode, extras=extras,
+ link_patterns=link_patterns,
+ use_file_vars=use_file_vars).convert(text)
+
+class Markdown(object):
+ # The dict of "extras" to enable in processing -- a mapping of
+ # extra name to argument for the extra. Most extras do not have an
+ # argument, in which case the value is None.
+ #
+ # This can be set via (a) subclassing and (b) the constructor
+ # "extras" argument.
+ extras = None
+
+ urls = None
+ titles = None
+ html_blocks = None
+ html_spans = None
+ html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
+
+ # Used to track when we're inside an ordered or unordered list
+ # (see _ProcessListItems() for details):
+ list_level = 0
+
+ _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
+
+ def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
+ extras=None, link_patterns=None, use_file_vars=False):
+ if html4tags:
+ self.empty_element_suffix = ">"
+ else:
+ self.empty_element_suffix = " />"
+ self.tab_width = tab_width
+
+ # For compatibility with earlier markdown2.py and with
+ # markdown.py's safe_mode being a boolean,
+ # safe_mode == True -> "replace"
+ if safe_mode is True:
+ self.safe_mode = "replace"
+ else:
+ self.safe_mode = safe_mode
+
+ if self.extras is None:
+ self.extras = {}
+ elif not isinstance(self.extras, dict):
+ self.extras = dict([(e, None) for e in self.extras])
+ if extras:
+ if not isinstance(extras, dict):
+ extras = dict([(e, None) for e in extras])
+ self.extras.update(extras)
+ assert isinstance(self.extras, dict)
+ self._instance_extras = self.extras.copy()
+ self.link_patterns = link_patterns
+ self.use_file_vars = use_file_vars
+ self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
+
+ def reset(self):
+ self.urls = {}
+ self.titles = {}
+ self.html_blocks = {}
+ self.html_spans = {}
+ self.list_level = 0
+ self.extras = self._instance_extras.copy()
+ if "footnotes" in self.extras:
+ self.footnotes = {}
+ self.footnote_ids = []
+
+ def convert(self, text):
+ """Convert the given text."""
+ # Main function. The order in which other subs are called here is
+ # essential. Link and image substitutions need to happen before
+ # _EscapeSpecialChars(), so that any *'s or _'s in the <a>
+ # and <img> tags get encoded.
+
+ # Clear the global hashes. If we don't clear these, you get conflicts
+ # from other articles when generating a page which contains more than
+ # one article (e.g. an index page that shows the N most recent
+ # articles):
+ self.reset()
+
+ if not isinstance(text, unicode):
+ #TODO: perhaps shouldn't presume UTF-8 for string input?
+ text = unicode(text, 'utf-8')
+
+ if self.use_file_vars:
+ # Look for emacs-style file variable hints.
+ emacs_vars = self._get_emacs_vars(text)
+ if "markdown-extras" in emacs_vars:
+ splitter = re.compile("[ ,]+")
+ for e in splitter.split(emacs_vars["markdown-extras"]):
+ if '=' in e:
+ ename, earg = e.split('=', 1)
+ try:
+ earg = int(earg)
+ except ValueError:
+ pass
+ else:
+ ename, earg = e, None
+ self.extras[ename] = earg
+
+ # Standardize line endings:
+ text = re.sub("\r\n|\r", "\n", text)
+
+ # Make sure $text ends with a couple of newlines:
+ text += "\n\n"
+
+ # Convert all tabs to spaces.
+ text = self._detab(text)
+
+ # Strip any lines consisting only of spaces and tabs.
+ # This makes subsequent regexen easier to write, because we can
+ # match consecutive blank lines with /\n+/ instead of something
+ # contorted like /[ \t]*\n+/ .
+ text = self._ws_only_line_re.sub("", text)
+
+ if self.safe_mode:
+ text = self._hash_html_spans(text)
+
+ # Turn block-level HTML blocks into hash entries
+ text = self._hash_html_blocks(text, raw=True)
+
+ # Strip link definitions, store in hashes.
+ if "footnotes" in self.extras:
+ # Must do footnotes first because an unlucky footnote defn
+ # looks like a link defn:
+ # [^4]: this "looks like a link defn"
+ text = self._strip_footnote_definitions(text)
+ text = self._strip_link_definitions(text)
+
+ text = self._run_block_gamut(text)
+
+ text = self._unescape_special_chars(text)
+
+ if "footnotes" in self.extras:
+ text = self._add_footnotes(text)
+
+ if self.safe_mode:
+ text = self._unhash_html_spans(text)
+
+ text += "\n"
+ return text
+
+ _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
+ # This regular expression is intended to match blocks like this:
+ # PREFIX Local Variables: SUFFIX
+ # PREFIX mode: Tcl SUFFIX
+ # PREFIX End: SUFFIX
+ # Some notes:
+ # - "[ \t]" is used instead of "\s" to specifically exclude newlines
+ # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
+ # not like anything other than Unix-style line terminators.
+ _emacs_local_vars_pat = re.compile(r"""^
+ (?P<prefix>(?:[^\r\n|\n|\r])*?)
+ [\ \t]*Local\ Variables:[\ \t]*
+ (?P<suffix>.*?)(?:\r\n|\n|\r)
+ (?P<content>.*?\1End:)
+ """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
+
+ def _get_emacs_vars(self, text):
+ """Return a dictionary of emacs-style local variables.
+
+ Parsing is done loosely according to this spec (and according to
+ some in-practice deviations from this):
+ http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
+ """
+ emacs_vars = {}
+ SIZE = pow(2, 13) # 8kB
+
+ # Search near the start for a '-*-'-style one-liner of variables.
+ head = text[:SIZE]
+ if "-*-" in head:
+ match = self._emacs_oneliner_vars_pat.search(head)
+ if match:
+ emacs_vars_str = match.group(1)
+ assert '\n' not in emacs_vars_str
+ emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
+ if s.strip()]
+ if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
+ # While not in the spec, this form is allowed by emacs:
+ # -*- Tcl -*-
+ # where the implied "variable" is "mode". This form
+ # is only allowed if there are no other variables.
+ emacs_vars["mode"] = emacs_var_strs[0].strip()
+ else:
+ for emacs_var_str in emacs_var_strs:
+ try:
+ variable, value = emacs_var_str.strip().split(':', 1)
+ except ValueError:
+ log.debug("emacs variables error: malformed -*- "
+ "line: %r", emacs_var_str)
+ continue
+ # Lowercase the variable name because Emacs allows "Mode"
+ # or "mode" or "MoDe", etc.
+ emacs_vars[variable.lower()] = value.strip()
+
+ tail = text[-SIZE:]
+ if "Local Variables" in tail:
+ match = self._emacs_local_vars_pat.search(tail)
+ if match:
+ prefix = match.group("prefix")
+ suffix = match.group("suffix")
+ lines = match.group("content").splitlines(0)
+ #print "prefix=%r, suffix=%r, content=%r, lines: %s"\
+ # % (prefix, suffix, match.group("content"), lines)
+
+ # Validate the Local Variables block: proper prefix and suffix
+ # usage.
+ for i, line in enumerate(lines):
+ if not line.startswith(prefix):
+ log.debug("emacs variables error: line '%s' "
+ "does not use proper prefix '%s'"
+ % (line, prefix))
+ return {}
+ # Don't validate suffix on last line. Emacs doesn't care,
+ # neither should we.
+ if i != len(lines)-1 and not line.endswith(suffix):
+ log.debug("emacs variables error: line '%s' "
+ "does not use proper suffix '%s'"
+ % (line, suffix))
+ return {}
+
+ # Parse out one emacs var per line.
+ continued_for = None
+ for line in lines[:-1]: # no var on the last line ("PREFIX End:")
+ if prefix: line = line[len(prefix):] # strip prefix
+ if suffix: line = line[:-len(suffix)] # strip suffix
+ line = line.strip()
+ if continued_for:
+ variable = continued_for
+ if line.endswith('\\'):
+ line = line[:-1].rstrip()
+ else:
+ continued_for = None
+ emacs_vars[variable] += ' ' + line
+ else:
+ try:
+ variable, value = line.split(':', 1)
+ except ValueError:
+ log.debug("local variables error: missing colon "
+ "in local variables entry: '%s'" % line)
+ continue
+ # Do NOT lowercase the variable name, because Emacs only
+ # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
+ value = value.strip()
+ if value.endswith('\\'):
+ value = value[:-1].rstrip()
+ continued_for = variable
+ else:
+ continued_for = None
+ emacs_vars[variable] = value
+
+ # Unquote values.
+ for var, val in emacs_vars.items():
+ if len(val) > 1 and (val.startswith('"') and val.endswith('"')
+ or val.startswith('"') and val.endswith('"')):
+ emacs_vars[var] = val[1:-1]
+
+ return emacs_vars
+
+ # Cribbed from a post by Bart Lateur:
+ # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
+ _detab_re = re.compile(r'(.*?)\t', re.M)
+ def _detab_sub(self, match):
+ g1 = match.group(1)
+ return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
+ def _detab(self, text):
+ r"""Remove (leading?) tabs from a file.
+
+ >>> m = Markdown()
+ >>> m._detab("\tfoo")
+ ' foo'
+ >>> m._detab(" \tfoo")
+ ' foo'
+ >>> m._detab("\t foo")
+ ' foo'
+ >>> m._detab(" foo")
+ ' foo'
+ >>> m._detab(" foo\n\tbar\tblam")
+ ' foo\n bar blam'
+ """
+ if '\t' not in text:
+ return text
+ return self._detab_re.subn(self._detab_sub, text)[0]
+
+ _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
+ _strict_tag_block_re = re.compile(r"""
+ ( # save in \1
+ ^ # start of line (with re.M)
+ <(%s) # start tag = \2
+ \b # word break
+ (.*\n)*? # any number of lines, minimally matching
+ </\2> # the matching end tag
+ [ \t]* # trailing spaces/tabs
+ (?=\n+|\Z) # followed by a newline or end of document
+ )
+ """ % _block_tags_a,
+ re.X | re.M)
+
+ _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
+ _liberal_tag_block_re = re.compile(r"""
+ ( # save in \1
+ ^ # start of line (with re.M)
+ <(%s) # start tag = \2
+ \b # word break
+ (.*\n)*? # any number of lines, minimally matching
+ .*</\2> # the matching end tag
+ [ \t]* # trailing spaces/tabs
+ (?=\n+|\Z) # followed by a newline or end of document
+ )
+ """ % _block_tags_b,
+ re.X | re.M)
+
+ # Save for usage in coming 'xml' extra.
+ XXX_liberal_tag_block_re = re.compile(r"""
+ ( # save in \1
+ ^ # start of line (with re.M)
+ (?:
+ <(%s|\w+:\w+) # start tag = \2
+ \b # word break
+ (?:.*\n)*? # any number of lines, minimally matching
+ .*</\2> # the matching end tag
+ |
+ <(\w+:)?\w+ # single tag-start
+ \b # word break
+ .*? # any content on one line, minimally matching
+ /> # end of tag
+ |
+ <\?\w+ # start of processing instruction
+ \b # word break
+ .*? # any content on one line, minimally matching
+ \?> # the PI end tag
+ )
+ [ \t]* # trailing spaces/tabs
+ (?=\n+|\Z) # followed by a newline or end of document
+ )
+ """ % _block_tags_b,
+ re.X | re.M)
+
+ def _hash_html_block_sub(self, match, raw=False):
+ html = match.group(1)
+ if raw and self.safe_mode:
+ html = self._sanitize_html(html)
+ key = _hash_text(html)
+ self.html_blocks[key] = html
+ return "\n\n" + key + "\n\n"
+
+ def _hash_html_blocks(self, text, raw=False):
+ """Hashify HTML blocks
+
+ We only want to do this for block-level HTML tags, such as headers,
+ lists, and tables. That's because we still want to wrap <p>s around
+ "paragraphs" that are wrapped in non-block-level tags, such as anchors,
+ phrase emphasis, and spans. The list of tags we're looking for is
+ hard-coded.
+
+ @param raw {boolean} indicates if these are raw HTML blocks in
+ the original source. It makes a difference in "safe" mode.
+ """
+ if '<' not in text:
+ return text
+
+ # Pass `raw` value into our calls to self._hash_html_block_sub.
+ hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
+
+ # First, look for nested blocks, e.g.:
+ # <div>
+ # <div>
+ # tags for inner block must be indented.
+ # </div>
+ # </div>
+ #
+ # The outermost tags must start at the left margin for this to match, and
+ # the inner nested divs must be indented.
+ # We need to do this before the next, more liberal match, because the next
+ # match will start at the first `<div>` and stop at the first `</div>`.
+ text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
+
+ # Now match more liberally, simply from `\n<tag>` to `</tag>\n`
+ text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
+
+ # Special case just for <hr />. It was easier to make a special
+ # case than to make the other regex more complicated.
+ if "<hr" in text:
+ _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
+ text = _hr_tag_re.sub(hash_html_block_sub, text)
+
+ # Special case for standalone HTML comments:
+ if "<!--" in text:
+ start = 0
+ while True:
+ # Delimiters for next comment block.
+ try:
+ start_idx = text.index("<!--", start)
+ except ValueError, ex:
+ break
+ try:
+ end_idx = text.index("-->", start_idx) + 3
+ except ValueError, ex:
+ break
+
+ # Start position for next comment block search.
+ start = end_idx
+
+ # Validate whitespace before comment.
+ if start_idx:
+ # - Up to `tab_width - 1` spaces before start_idx.
+ for i in range(self.tab_width - 1):
+ if text[start_idx - 1] != ' ':
+ break
+ start_idx -= 1
+ if start_idx == 0:
+ break
+ # - Must be preceded by 2 newlines or hit the start of
+ # the document.
+ if start_idx == 0:
+ pass
+ elif start_idx == 1 and text[0] == '\n':
+ start_idx = 0 # to match minute detail of Markdown.pl regex
+ elif text[start_idx-2:start_idx] == '\n\n':
+ pass
+ else:
+ break
+
+ # Validate whitespace after comment.
+ # - Any number of spaces and tabs.
+ while end_idx < len(text):
+ if text[end_idx] not in ' \t':
+ break
+ end_idx += 1
+ # - Must be following by 2 newlines or hit end of text.
+ if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
+ continue
+
+ # Escape and hash (must match `_hash_html_block_sub`).
+ html = text[start_idx:end_idx]
+ if raw and self.safe_mode:
+ html = self._sanitize_html(html)
+ key = _hash_text(html)
+ self.html_blocks[key] = html
+ text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
+
+ return text
+
+ def _strip_link_definitions(self, text):
+ # Strips link definitions from text, stores the URLs and titles in
+ # hash references.
+ less_than_tab = self.tab_width - 1
+
+ # Link defs are in the form:
+ # [id]: url "optional title"
+ _link_def_re = re.compile(r"""
+ ^[ ]{0,%d}\[(.+)\]: # id = \1
+ [ \t]*
+ \n? # maybe *one* newline
+ [ \t]*
+ <?(.+?)>? # url = \2
+ [ \t]*
+ (?:
+ \n? # maybe one newline
+ [ \t]*
+ (?<=\s) # lookbehind for whitespace
+ ['"(]
+ ([^\n]*) # title = \3
+ ['")]
+ [ \t]*
+ )? # title is optional
+ (?:\n+|\Z)
+ """ % less_than_tab, re.X | re.M | re.U)
+ return _link_def_re.sub(self._extract_link_def_sub, text)
+
+ def _extract_link_def_sub(self, match):
+ id, url, title = match.groups()
+ key = id.lower() # Link IDs are case-insensitive
+ self.urls[key] = self._encode_amps_and_angles(url)
+ if title:
+ self.titles[key] = title.replace('"', '"')
+ return ""
+
+ def _extract_footnote_def_sub(self, match):
+ id, text = match.groups()
+ text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
+ normed_id = re.sub(r'\W', '-', id)
+ # Ensure footnote text ends with a couple newlines (for some
+ # block gamut matches).
+ self.footnotes[normed_id] = text + "\n\n"
+ return ""
+
+ def _strip_footnote_definitions(self, text):
+ """A footnote definition looks like this:
+
+ [^note-id]: Text of the note.
+
+ May include one or more indented paragraphs.
+
+ Where,
+ - The 'note-id' can be pretty much anything, though typically it
+ is the number of the footnote.
+ - The first paragraph may start on the next line, like so:
+
+ [^note-id]:
+ Text of the note.
+ """
+ less_than_tab = self.tab_width - 1
+ footnote_def_re = re.compile(r'''
+ ^[ ]{0,%d}\[\^(.+)\]: # id = \1
+ [ \t]*
+ ( # footnote text = \2
+ # First line need not start with the spaces.
+ (?:\s*.*\n+)
+ (?:
+ (?:[ ]{%d} | \t) # Subsequent lines must be indented.
+ .*\n+
+ )*
+ )
+ # Lookahead for non-space at line-start, or end of doc.
+ (?:(?=^[ ]{0,%d}\S)|\Z)
+ ''' % (less_than_tab, self.tab_width, self.tab_width),
+ re.X | re.M)
+ return footnote_def_re.sub(self._extract_footnote_def_sub, text)
+
+
+ _hr_res = [
+ re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M),
+ re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M),
+ re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M),
+ ]
+
+ def _run_block_gamut(self, text):
+ # These are all the transformations that form block-level
+ # tags like paragraphs, headers, and list items.
+
+ text = self._do_headers(text)
+
+ # Do Horizontal Rules:
+ hr = "\n<hr"+self.empty_element_suffix+"\n"
+ for hr_re in self._hr_res:
+ text = hr_re.sub(hr, text)
+
+ text = self._do_lists(text)
+
+ if "pyshell" in self.extras:
+ text = self._prepare_pyshell_blocks(text)
+
+ text = self._do_code_blocks(text)
+
+ text = self._do_block_quotes(text)
+
+ # We already ran _HashHTMLBlocks() before, in Markdown(), but that
+ # was to escape raw HTML in the original Markdown source. This time,
+ # we're escaping the markup we've just created, so that we don't wrap
+ # <p> tags around block-level tags.
+ text = self._hash_html_blocks(text)
+
+ text = self._form_paragraphs(text)
+
+ return text
+
+ def _pyshell_block_sub(self, match):
+ lines = match.group(0).splitlines(0)
+ _dedentlines(lines)
+ indent = ' ' * self.tab_width
+ s = ('\n' # separate from possible cuddled paragraph
+ + indent + ('\n'+indent).join(lines)
+ + '\n\n')
+ return s
+
+ def _prepare_pyshell_blocks(self, text):
+ """Ensure that Python interactive shell sessions are put in
+ code blocks -- even if not properly indented.
+ """
+ if ">>>" not in text:
+ return text
+
+ less_than_tab = self.tab_width - 1
+ _pyshell_block_re = re.compile(r"""
+ ^([ ]{0,%d})>>>[ ].*\n # first line
+ ^(\1.*\S+.*\n)* # any number of subsequent lines
+ ^\n # ends with a blank line
+ """ % less_than_tab, re.M | re.X)
+
+ return _pyshell_block_re.sub(self._pyshell_block_sub, text)
+
+ def _run_span_gamut(self, text):
+ # These are all the transformations that occur *within* block-level
+ # tags like paragraphs, headers, and list items.
+
+ text = self._do_code_spans(text)
+
+ text = self._escape_special_chars(text)
+
+ # Process anchor and image tags.
+ text = self._do_links(text)
+
+ # Make links out of things like `<http://example.com/>`
+ # Must come after _do_links(), because you can use < and >
+ # delimiters in inline links like [this](<url>).
+ text = self._do_auto_links(text)
+
+ if "link-patterns" in self.extras:
+ text = self._do_link_patterns(text)
+
+ text = self._encode_amps_and_angles(text)
+
+ text = self._do_italics_and_bold(text)
+
+ # Do hard breaks:
+ text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
+
+ return text
+
+ # "Sorta" because auto-links are identified as "tag" tokens.
+ _sorta_html_tokenize_re = re.compile(r"""
+ (
+ # tag
+ </?
+ (?:\w+) # tag name
+ (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
+ \s*/?>
+ |
+ # auto-link (e.g., <http://www.activestate.com/>)
+ <\w+[^>]*>
+ |
+ <!--.*?--> # comment
+ |
+ <\?.*?\?> # processing instruction
+ )
+ """, re.X)
+
+ def _escape_special_chars(self, text):
+ # Python markdown note: the HTML tokenization here differs from
+ # that in Markdown.pl, hence the behaviour for subtle cases can
+ # differ (I believe the tokenizer here does a better job because
+ # it isn't susceptible to unmatched '<' and '>' in HTML tags).
+ # Note, however, that '>' is not allowed in an auto-link URL
+ # here.
+ escaped = []
+ is_html_markup = False
+ for token in self._sorta_html_tokenize_re.split(text):
+ if is_html_markup:
+ # Within tags/HTML-comments/auto-links, encode * and _
+ # so they don't conflict with their use in Markdown for
+ # italics and strong. We're replacing each such
+ # character with its corresponding MD5 checksum value;
+ # this is likely overkill, but it should prevent us from
+ # colliding with the escape values by accident.
+ escaped.append(token.replace('*', g_escape_table['*'])
+ .replace('_', g_escape_table['_']))
+ else:
+ escaped.append(self._encode_backslash_escapes(token))
+ is_html_markup = not is_html_markup
+ return ''.join(escaped)
+
+ def _hash_html_spans(self, text):
+ # Used for safe_mode.
+
+ def _is_auto_link(s):
+ if ':' in s and self._auto_link_re.match(s):
+ return True
+ elif '@' in s and self._auto_email_link_re.match(s):
+ return True
+ return False
+
+ tokens = []
+ is_html_markup = False
+ for token in self._sorta_html_tokenize_re.split(text):
+ if is_html_markup and not _is_auto_link(token):
+ sanitized = self._sanitize_html(token)
+ key = _hash_text(sanitized)
+ self.html_spans[key] = sanitized
+ tokens.append(key)
+ else:
+ tokens.append(token)
+ is_html_markup = not is_html_markup
+ return ''.join(tokens)
+
+ def _unhash_html_spans(self, text):
+ for key, sanitized in self.html_spans.items():
+ text = text.replace(key, sanitized)
+ return text
+
+ def _sanitize_html(self, s):
+ if self.safe_mode == "replace":
+ return self.html_removed_text
+ elif self.safe_mode == "escape":
+ replacements = [
+ ('&', '&'),
+ ('<', '<'),
+ ('>', '>'),
+ ]
+ for before, after in replacements:
+ s = s.replace(before, after)
+ return s
+ else:
+ raise MarkdownError("invalid value for 'safe_mode': %r (must be "
+ "'escape' or 'replace')" % self.safe_mode)
+
+ _tail_of_inline_link_re = re.compile(r'''
+ # Match tail of: [text](/url/) or [text](/url/ "title")
+ \( # literal paren
+ [ \t]*
+ (?P<url> # \1
+ <.*?>
+ |
+ .*?
+ )
+ [ \t]*
+ ( # \2
+ (['"]) # quote char = \3
+ (?P<title>.*?)
+ \3 # matching quote
+ )? # title is optional
+ \)
+ ''', re.X | re.S)
+ _tail_of_reference_link_re = re.compile(r'''
+ # Match tail of: [text][id]
+ [ ]? # one optional space
+ (?:\n[ ]*)? # one optional newline followed by spaces
+ \[
+ (?P<id>.*?)
+ \]
+ ''', re.X | re.S)
+
+ def _do_links(self, text):
+ """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
+
+ This is a combination of Markdown.pl's _DoAnchors() and
+ _DoImages(). They are done together because that simplified the
+ approach. It was necessary to use a different approach than
+ Markdown.pl because of the lack of atomic matching support in
+ Python's regex engine used in $g_nested_brackets.
+ """
+ MAX_LINK_TEXT_SENTINEL = 300
+
+ # `anchor_allowed_pos` is used to support img links inside
+ # anchors, but not anchors inside anchors. An anchor's start
+ # pos must be `>= anchor_allowed_pos`.
+ anchor_allowed_pos = 0
+
+ curr_pos = 0
+ while True: # Handle the next link.
+ # The next '[' is the start of:
+ # - an inline anchor: [text](url "title")
+ # - a reference anchor: [text][id]
+ # - an inline img: ![text](url "title")
+ # - a reference img: ![text][id]
+ # - a footnote ref: [^id]
+ # (Only if 'footnotes' extra enabled)
+ # - a footnote defn: [^id]: ...
+ # (Only if 'footnotes' extra enabled) These have already
+ # been stripped in _strip_footnote_definitions() so no
+ # need to watch for them.
+ # - a link definition: [id]: url "title"
+ # These have already been stripped in
+ # _strip_link_definitions() so no need to watch for them.
+ # - not markup: [...anything else...
+ try:
+ start_idx = text.index('[', curr_pos)
+ except ValueError:
+ break
+ text_length = len(text)
+
+ # Find the matching closing ']'.
+ # Markdown.pl allows *matching* brackets in link text so we
+ # will here too. Markdown.pl *doesn't* currently allow
+ # matching brackets in img alt text -- we'll differ in that
+ # regard.
+ bracket_depth = 0
+ for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
+ text_length)):
+ ch = text[p]
+ if ch == ']':
+ bracket_depth -= 1
+ if bracket_depth < 0:
+ break
+ elif ch == '[':
+ bracket_depth += 1
+ else:
+ # Closing bracket not found within sentinel length.
+ # This isn't markup.
+ curr_pos = start_idx + 1
+ continue
+ link_text = text[start_idx+1:p]
+
+ # Possibly a footnote ref?
+ if "footnotes" in self.extras and link_text.startswith("^"):
+ normed_id = re.sub(r'\W', '-', link_text[1:])
+ if normed_id in self.footnotes:
+ self.footnote_ids.append(normed_id)
+ result = '<sup class="footnote-ref" id="fnref-%s">' \
+ '<a href="#fn-%s">%s</a></sup>' \
+ % (normed_id, normed_id, len(self.footnote_ids))
+ text = text[:start_idx] + result + text[p+1:]
+ else:
+ # This id isn't defined, leave the markup alone.
+ curr_pos = p+1
+ continue
+
+ # Now determine what this is by the remainder.
+ p += 1
+ if p == text_length:
+ return text
+
+ # Inline anchor or img?
+ if text[p] == '(': # attempt at perf improvement
+ match = self._tail_of_inline_link_re.match(text, p)
+ if match:
+ # Handle an inline anchor or img.
+ is_img = start_idx > 0 and text[start_idx-1] == "!"
+ if is_img:
+ start_idx -= 1
+
+ url, title = match.group("url"), match.group("title")
+ if url and url[0] == '<':
+ url = url[1:-1] # '<url>' -> 'url'
+ # We've got to encode these to avoid conflicting
+ # with italics/bold.
+ url = url.replace('*', g_escape_table['*']) \
+ .replace('_', g_escape_table['_'])
+ if title:
+ title_str = ' title="%s"' \
+ % title.replace('*', g_escape_table['*']) \
+ .replace('_', g_escape_table['_']) \
+ .replace('"', '"')
+ else:
+ title_str = ''
+ if is_img:
+ result = '<img src="%s" alt="%s"%s%s' \
+ % (url, link_text.replace('"', '"'),
+ title_str, self.empty_element_suffix)
+ curr_pos = start_idx + len(result)
+ text = text[:start_idx] + result + text[match.end():]
+ elif start_idx >= anchor_allowed_pos:
+ result_head = '<a href="%s"%s>' % (url, title_str)
+ result = '%s%s</a>' % (result_head, link_text)
+ # <img> allowed from curr_pos on, <a> from
+ # anchor_allowed_pos on.
+ curr_pos = start_idx + len(result_head)
+ anchor_allowed_pos = start_idx + len(result)
+ text = text[:start_idx] + result + text[match.end():]
+ else:
+ # Anchor not allowed here.
+ curr_pos = start_idx + 1
+ continue
+
+ # Reference anchor or img?
+ else:
+ match = self._tail_of_reference_link_re.match(text, p)
+ if match:
+ # Handle a reference-style anchor or img.
+ is_img = start_idx > 0 and text[start_idx-1] == "!"
+ if is_img:
+ start_idx -= 1
+ link_id = match.group("id").lower()
+ if not link_id:
+ link_id = link_text.lower() # for links like [this][]
+ if link_id in self.urls:
+ url = self.urls[link_id]
+ # We've got to encode these to avoid conflicting
+ # with italics/bold.
+ url = url.replace('*', g_escape_table['*']) \
+ .replace('_', g_escape_table['_'])
+ title = self.titles.get(link_id)
+ if title:
+ title = title.replace('*', g_escape_table['*']) \
+ .replace('_', g_escape_table['_'])
+ title_str = ' title="%s"' % title
+ else:
+ title_str = ''
+ if is_img:
+ result = '<img src="%s" alt="%s"%s%s' \
+ % (url, link_text.replace('"', '"'),
+ title_str, self.empty_element_suffix)
+ curr_pos = start_idx + len(result)
+ text = text[:start_idx] + result + text[match.end():]
+ elif start_idx >= anchor_allowed_pos:
+ result = '<a href="%s"%s>%s</a>' \
+ % (url, title_str, link_text)
+ result_head = '<a href="%s"%s>' % (url, title_str)
+ result = '%s%s</a>' % (result_head, link_text)
+ # <img> allowed from curr_pos on, <a> from
+ # anchor_allowed_pos on.
+ curr_pos = start_idx + len(result_head)
+ anchor_allowed_pos = start_idx + len(result)
+ text = text[:start_idx] + result + text[match.end():]
+ else:
+ # Anchor not allowed here.
+ curr_pos = start_idx + 1
+ else:
+ # This id isn't defined, leave the markup alone.
+ curr_pos = match.end()
+ continue
+
+ # Otherwise, it isn't markup.
+ curr_pos = start_idx + 1
+
+ return text
+
+
+ _setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
+ def _setext_h_sub(self, match):
+ n = {"=": 1, "-": 2}[match.group(2)[0]]
+ demote_headers = self.extras.get("demote-headers")
+ if demote_headers:
+ n = min(n + demote_headers, 6)
+ return "<h%d>%s</h%d>\n\n" \
+ % (n, self._run_span_gamut(match.group(1)), n)
+
+ _atx_h_re = re.compile(r'''
+ ^(\#{1,6}) # \1 = string of #'s
+ [ \t]*
+ (.+?) # \2 = Header text
+ [ \t]*
+ (?<!\\) # ensure not an escaped trailing '#'
+ \#* # optional closing #'s (not counted)
+ \n+
+ ''', re.X | re.M)
+ def _atx_h_sub(self, match):
+ n = len(match.group(1))
+ demote_headers = self.extras.get("demote-headers")
+ if demote_headers:
+ n = min(n + demote_headers, 6)
+ return "<h%d>%s</h%d>\n\n" \
+ % (n, self._run_span_gamut(match.group(2)), n)
+
+ def _do_headers(self, text):
+ # Setext-style headers:
+ # Header 1
+ # ========
+ #
+ # Header 2
+ # --------
+ text = self._setext_h_re.sub(self._setext_h_sub, text)
+
+ # atx-style headers:
+ # # Header 1
+ # ## Header 2
+ # ## Header 2 with closing hashes ##
+ # ...
+ # ###### Header 6
+ text = self._atx_h_re.sub(self._atx_h_sub, text)
+
+ return text
+
+
+ _marker_ul_chars = '*+-'
+ _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
+ _marker_ul = '(?:[%s])' % _marker_ul_chars
+ _marker_ol = r'(?:\d+\.)'
+
+ def _list_sub(self, match):
+ lst = match.group(1)
+ lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
+ result = self._process_list_items(lst)
+ if self.list_level:
+ return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
+ else:
+ return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
+
+ def _do_lists(self, text):
+ # Form HTML ordered (numbered) and unordered (bulleted) lists.
+
+ for marker_pat in (self._marker_ul, self._marker_ol):
+ # Re-usable pattern to match any entire ul or ol list:
+ less_than_tab = self.tab_width - 1
+ whole_list = r'''
+ ( # \1 = whole list
+ ( # \2
+ [ ]{0,%d}
+ (%s) # \3 = first list item marker
+ [ \t]+
+ )
+ (?:.+?)
+ ( # \4
+ \Z
+ |
+ \n{2,}
+ (?=\S)
+ (?! # Negative lookahead for another list item marker
+ [ \t]*
+ %s[ \t]+
+ )
+ )
+ )
+ ''' % (less_than_tab, marker_pat, marker_pat)
+
+ # We use a different prefix before nested lists than top-level lists.
+ # See extended comment in _process_list_items().
+ #
+ # Note: There's a bit of duplication here. My original implementation
+ # created a scalar regex pattern as the conditional result of the test on
+ # $g_list_level, and then only ran the $text =~ s{...}{...}egmx
+ # substitution once, using the scalar as the pattern. This worked,
+ # everywhere except when running under MT on my hosting account at Pair
+ # Networks. There, this caused all rebuilds to be killed by the reaper (or
+ # perhaps they crashed, but that seems incredibly unlikely given that the
+ # same script on the same server ran fine *except* under MT. I've spent
+ # more time trying to figure out why this is happening than I'd like to
+ # admit. My only guess, backed up by the fact that this workaround works,
+ # is that Perl optimizes the substition when it can figure out that the
+ # pattern will never change, and when this optimization isn't on, we run
+ # afoul of the reaper. Thus, the slightly redundant code to that uses two
+ # static s/// patterns rather than one conditional pattern.
+
+ if self.list_level:
+ sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
+ text = sub_list_re.sub(self._list_sub, text)
+ else:
+ list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
+ re.X | re.M | re.S)
+ text = list_re.sub(self._list_sub, text)
+
+ return text
+
+ _list_item_re = re.compile(r'''
+ (\n)? # leading line = \1
+ (^[ \t]*) # leading whitespace = \2
+ (%s) [ \t]+ # list marker = \3
+ ((?:.+?) # list item text = \4
+ (\n{1,2})) # eols = \5
+ (?= \n* (\Z | \2 (%s) [ \t]+))
+ ''' % (_marker_any, _marker_any),
+ re.M | re.X | re.S)
+
+ _last_li_endswith_two_eols = False
+ def _list_item_sub(self, match):
+ item = match.group(4)
+ leading_line = match.group(1)
+ leading_space = match.group(2)
+ if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
+ item = self._run_block_gamut(self._outdent(item))
+ else:
+ # Recursion for sub-lists:
+ item = self._do_lists(self._outdent(item))
+ if item.endswith('\n'):
+ item = item[:-1]
+ item = self._run_span_gamut(item)
+ self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
+ return "<li>%s</li>\n" % item
+
+ def _process_list_items(self, list_str):
+ # Process the contents of a single ordered or unordered list,
+ # splitting it into individual list items.
+
+ # The $g_list_level global keeps track of when we're inside a list.
+ # Each time we enter a list, we increment it; when we leave a list,
+ # we decrement. If it's zero, we're not in a list anymore.
+ #
+ # We do this because when we're not inside a list, we want to treat
+ # something like this:
+ #
+ # I recommend upgrading to version
+ # 8. Oops, now this line is treated
+ # as a sub-list.
+ #
+ # As a single paragraph, despite the fact that the second line starts
+ # with a digit-period-space sequence.
+ #
+ # Whereas when we're inside a list (or sub-list), that line will be
+ # treated as the start of a sub-list. What a kludge, huh? This is
+ # an aspect of Markdown's syntax that's hard to parse perfectly
+ # without resorting to mind-reading. Perhaps the solution is to
+ # change the syntax rules such that sub-lists must start with a
+ # starting cardinal number; e.g. "1." or "a.".
+ self.list_level += 1
+ self._last_li_endswith_two_eols = False
+ list_str = list_str.rstrip('\n') + '\n'
+ list_str = self._list_item_re.sub(self._list_item_sub, list_str)
+ self.list_level -= 1
+ return list_str
+
+ def _get_pygments_lexer(self, lexer_name):
+ try:
+ from pygments import lexers, util
+ except ImportError:
+ return None
+ try:
+ return lexers.get_lexer_by_name(lexer_name)
+ except util.ClassNotFound:
+ return None
+
+ def _color_with_pygments(self, codeblock, lexer):
+ import pygments
+ import pygments.formatters
+
+ class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
+ def _wrap_code(self, inner):
+ """A function for use in a Pygments Formatter which
+ wraps in <code> tags.
+ """
+ yield 0, "<code>"
+ for tup in inner:
+ yield tup
+ yield 0, "</code>"
+
+ def wrap(self, source, outfile):
+ """Return the source with a code, pre, and div."""
+ return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
+
+ formatter = HtmlCodeFormatter(cssclass="codehilite")
+ return pygments.highlight(codeblock, lexer, formatter)
+
+ def _code_block_sub(self, match):
+ codeblock = match.group(1)
+ codeblock = self._outdent(codeblock)
+ codeblock = self._detab(codeblock)
+ codeblock = codeblock.lstrip('\n') # trim leading newlines
+ codeblock = codeblock.rstrip() # trim trailing whitespace
+
+ if "code-color" in self.extras and codeblock.startswith(":::"):
+ lexer_name, rest = codeblock.split('\n', 1)
+ lexer_name = lexer_name[3:].strip()
+ lexer = self._get_pygments_lexer(lexer_name)
+ codeblock = rest.lstrip("\n") # Remove lexer declaration line.
+ if lexer:
+ colored = self._color_with_pygments(codeblock, lexer)
+ return "\n\n%s\n\n" % colored
+
+ codeblock = self._encode_code(codeblock)
+ return "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock
+
+ def _do_code_blocks(self, text):
+ """Process Markdown `<pre><code>` blocks."""
+ code_block_re = re.compile(r'''
+ (?:\n\n|\A)
+ ( # $1 = the code block -- one or more lines, starting with a space/tab
+ (?:
+ (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
+ .*\n+
+ )+
+ )
+ ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
+ ''' % (self.tab_width, self.tab_width),
+ re.M | re.X)
+
+ return code_block_re.sub(self._code_block_sub, text)
+
+
+ # Rules for a code span:
+ # - backslash escapes are not interpreted in a code span
+ # - to include one or or a run of more backticks the delimiters must
+ # be a longer run of backticks
+ # - cannot start or end a code span with a backtick; pad with a
+ # space and that space will be removed in the emitted HTML
+ # See `test/tm-cases/escapes.text` for a number of edge-case
+ # examples.
+ _code_span_re = re.compile(r'''
+ (?<!\\)
+ (`+) # \1 = Opening run of `
+ (?!`) # See Note A test/tm-cases/escapes.text
+ (.+?) # \2 = The code block
+ (?<!`)
+ \1 # Matching closer
+ (?!`)
+ ''', re.X | re.S)
+
+ def _code_span_sub(self, match):
+ c = match.group(2).strip(" \t")
+ c = self._encode_code(c)
+ return "<code>%s</code>" % c
+
+ def _do_code_spans(self, text):
+ # * Backtick quotes are used for <code></code> spans.
+ #
+ # * You can use multiple backticks as the delimiters if you want to
+ # include literal backticks in the code span. So, this input:
+ #
+ # Just type ``foo `bar` baz`` at the prompt.
+ #
+ # Will translate to:
+ #
+ # <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
+ #
+ # There's no arbitrary limit to the number of backticks you
+ # can use as delimters. If you need three consecutive backticks
+ # in your code, use four for delimiters, etc.
+ #
+ # * You can use spaces to get literal backticks at the edges:
+ #
+ # ... type `` `bar` `` ...
+ #
+ # Turns to:
+ #
+ # ... type <code>`bar`</code> ...
+ return self._code_span_re.sub(self._code_span_sub, text)
+
+ def _encode_code(self, text):
+ """Encode/escape certain characters inside Markdown code runs.
+ The point is that in code, these characters are literals,
+ and lose their special Markdown meanings.
+ """
+ replacements = [
+ # Encode all ampersands; HTML entities are not
+ # entities within a Markdown code span.
+ ('&', '&'),
+ # Do the angle bracket song and dance:
+ ('<', '<'),
+ ('>', '>'),
+ # Now, escape characters that are magic in Markdown:
+ ('*', g_escape_table['*']),
+ ('_', g_escape_table['_']),
+ ('{', g_escape_table['{']),
+ ('}', g_escape_table['}']),
+ ('[', g_escape_table['[']),
+ (']', g_escape_table[']']),
+ ('\\', g_escape_table['\\']),
+ ]
+ for before, after in replacements:
+ text = text.replace(before, after)
+ return text
+
+ _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
+ _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
+ _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
+ _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
+ def _do_italics_and_bold(self, text):
+ # <strong> must go first:
+ if "code-friendly" in self.extras:
+ text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
+ text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
+ else:
+ text = self._strong_re.sub(r"<strong>\2</strong>", text)
+ text = self._em_re.sub(r"<em>\2</em>", text)
+ return text
+
+
+ _block_quote_re = re.compile(r'''
+ ( # Wrap whole match in \1
+ (
+ ^[ \t]*>[ \t]? # '>' at the start of a line
+ .+\n # rest of the first line
+ (.+\n)* # subsequent consecutive lines
+ \n* # blanks
+ )+
+ )
+ ''', re.M | re.X)
+ _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
+
+ _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
+ def _dedent_two_spaces_sub(self, match):
+ return re.sub(r'(?m)^ ', '', match.group(1))
+
+ def _block_quote_sub(self, match):
+ bq = match.group(1)
+ bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
+ bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
+ bq = self._run_block_gamut(bq) # recurse
+
+ bq = re.sub('(?m)^', ' ', bq)
+ # These leading spaces screw with <pre> content, so we need to fix that:
+ bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
+
+ return "<blockquote>\n%s\n</blockquote>\n\n" % bq
+
+ def _do_block_quotes(self, text):
+ if '>' not in text:
+ return text
+ return self._block_quote_re.sub(self._block_quote_sub, text)
+
+ def _form_paragraphs(self, text):
+ # Strip leading and trailing lines:
+ text = text.strip('\n')
+
+ # Wrap <p> tags.
+ grafs = re.split(r"\n{2,}", text)
+ for i, graf in enumerate(grafs):
+ if graf in self.html_blocks:
+ # Unhashify HTML blocks
+ grafs[i] = self.html_blocks[graf]
+ else:
+ # Wrap <p> tags.
+ graf = self._run_span_gamut(graf)
+ grafs[i] = "<p>" + graf.lstrip(" \t") + "</p>"
+
+ return "\n\n".join(grafs)
+
+ def _add_footnotes(self, text):
+ if self.footnotes:
+ footer = [
+ '<div class="footnotes">',
+ '<hr' + self.empty_element_suffix,
+ '<ol>',
+ ]
+ for i, id in enumerate(self.footnote_ids):
+ if i != 0:
+ footer.append('')
+ footer.append('<li id="fn-%s">' % id)
+ footer.append(self._run_block_gamut(self.footnotes[id]))
+ backlink = ('<a href="#fnref-%s" '
+ 'class="footnoteBackLink" '
+ 'title="Jump back to footnote %d in the text.">'
+ '↩</a>' % (id, i+1))
+ if footer[-1].endswith("</p>"):
+ footer[-1] = footer[-1][:-len("</p>")] \
+ + ' ' + backlink + "</p>"
+ else:
+ footer.append("\n<p>%s</p>" % backlink)
+ footer.append('</li>')
+ footer.append('</ol>')
+ footer.append('</div>')
+ return text + '\n\n' + '\n'.join(footer)
+ else:
+ return text
+
+ # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
+ # http://bumppo.net/projects/amputator/
+ _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
+ _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
+
+ def _encode_amps_and_angles(self, text):
+ # Smart processing for ampersands and angle brackets that need
+ # to be encoded.
+ text = self._ampersand_re.sub('&', text)
+
+ # Encode naked <'s
+ text = self._naked_lt_re.sub('<', text)
+ return text
+
+ def _encode_backslash_escapes(self, text):
+ for ch, escape in g_escape_table.items():
+ text = text.replace("\\"+ch, escape)
+ return text
+
+ _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
+ def _auto_link_sub(self, match):
+ g1 = match.group(1)
+ return '<a href="%s">%s</a>' % (g1, g1)
+
+ _auto_email_link_re = re.compile(r"""
+ <
+ (?:mailto:)?
+ (
+ [-.\w]+
+ \@
+ [-\w]+(\.[-\w]+)*\.[a-zA-Z]+
+ )
+ >
+ """, re.I | re.X | re.U)
+ def _auto_email_link_sub(self, match):
+ return self._encode_email_address(
+ self._unescape_special_chars(match.group(1)))
+
+ def _do_auto_links(self, text):
+ text = self._auto_link_re.sub(self._auto_link_sub, text)
+ text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
+ return text
+
+ def _encode_email_address(self, addr):
+ # Input: an email address, e.g. "foo@example.com"
+ #
+ # Output: the email address as a mailto link, with each character
+ # of the address encoded as either a decimal or hex entity, in
+ # the hopes of foiling most address harvesting spam bots. E.g.:
+ #
+ # <a href="mailto:foo@e
+ # xample.com">foo
+ # @example.com</a>
+ #
+ # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
+ # mailing list: <http://tinyurl.com/yu7ue>
+ chars = [_xml_encode_email_char_at_random(ch)
+ for ch in "mailto:" + addr]
+ # Strip the mailto: from the visible part.
+ addr = '<a href="%s">%s</a>' \
+ % (''.join(chars), ''.join(chars[7:]))
+ return addr
+
+ def _do_link_patterns(self, text):
+ """Caveat emptor: there isn't much guarding against link
+ patterns being formed inside other standard Markdown links, e.g.
+ inside a [link def][like this].
+
+ Dev Notes: *Could* consider prefixing regexes with a negative
+ lookbehind assertion to attempt to guard against this.
+ """
+ link_from_hash = {}
+ for regex, href in self.link_patterns:
+ replacements = []
+ for match in regex.finditer(text):
+ replacements.append((match.span(), match.expand(href)))
+ for (start, end), href in reversed(replacements):
+ escaped_href = (
+ href.replace('"', '"') # b/c of attr quote
+ # To avoid markdown <em> and <strong>:
+ .replace('*', g_escape_table['*'])
+ .replace('_', g_escape_table['_']))
+ link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
+ hash = md5(link).hexdigest()
+ link_from_hash[hash] = link
+ text = text[:start] + hash + text[end:]
+ for hash, link in link_from_hash.items():
+ text = text.replace(hash, link)
+ return text
+
+ def _unescape_special_chars(self, text):
+ # Swap back in all the special characters we've hidden.
+ for ch, hash in g_escape_table.items():
+ text = text.replace(hash, ch)
+ return text
+
+ def _outdent(self, text):
+ # Remove one level of line-leading tabs or spaces
+ return self._outdent_re.sub('', text)
+
+
+class MarkdownWithExtras(Markdown):
+ """A markdowner class that enables most extras:
+
+ - footnotes
+ - code-color (only has effect if 'pygments' Python module on path)
+
+ These are not included:
+ - pyshell (specific to Python-related documenting)
+ - code-friendly (because it *disables* part of the syntax)
+ - link-patterns (because you need to specify some actual
+ link-patterns anyway)
+ """
+ extras = ["footnotes", "code-color"]
+
+
+#---- internal support functions
+
+# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
+def _curry(*args, **kwargs):
+ function, args = args[0], args[1:]
+ def result(*rest, **kwrest):
+ combined = kwargs.copy()
+ combined.update(kwrest)
+ return function(*args + rest, **combined)
+ return result
+
+# Recipe: regex_from_encoded_pattern (1.0)
+def _regex_from_encoded_pattern(s):
+ """'foo' -> re.compile(re.escape('foo'))
+ '/foo/' -> re.compile('foo')
+ '/foo/i' -> re.compile('foo', re.I)
+ """
+ if s.startswith('/') and s.rfind('/') != 0:
+ # Parse it: /PATTERN/FLAGS
+ idx = s.rfind('/')
+ pattern, flags_str = s[1:idx], s[idx+1:]
+ flag_from_char = {
+ "i": re.IGNORECASE,
+ "l": re.LOCALE,
+ "s": re.DOTALL,
+ "m": re.MULTILINE,
+ "u": re.UNICODE,
+ }
+ flags = 0
+ for char in flags_str:
+ try:
+ flags |= flag_from_char[char]
+ except KeyError:
+ raise ValueError("unsupported regex flag: '%s' in '%s' "
+ "(must be one of '%s')"
+ % (char, s, ''.join(flag_from_char.keys())))
+ return re.compile(s[1:idx], flags)
+ else: # not an encoded regex
+ return re.compile(re.escape(s))
+
+# Recipe: dedent (0.1.2)
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+ """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+ "lines" is a list of lines to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ Same as dedent() except operates on a sequence of lines. Note: the
+ lines list is modified **in-place**.
+ """
+ DEBUG = False
+ if DEBUG:
+ print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+ % (tabsize, skip_first_line)
+ indents = []
+ margin = None
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ indent = 0
+ for ch in line:
+ if ch == ' ':
+ indent += 1
+ elif ch == '\t':
+ indent += tabsize - (indent % tabsize)
+ elif ch in '\r\n':
+ continue # skip all-whitespace lines
+ else:
+ break
+ else:
+ continue # skip all-whitespace lines
+ if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
+ if margin is None:
+ margin = indent
+ else:
+ margin = min(margin, indent)
+ if DEBUG: print "dedent: margin=%r" % margin
+
+ if margin is not None and margin > 0:
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ removed = 0
+ for j, ch in enumerate(line):
+ if ch == ' ':
+ removed += 1
+ elif ch == '\t':
+ removed += tabsize - (removed % tabsize)
+ elif ch in '\r\n':
+ if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
+ lines[i] = lines[i][j:]
+ break
+ else:
+ raise ValueError("unexpected non-whitespace char %r in "
+ "line %r while removing %d-space margin"
+ % (ch, line, margin))
+ if DEBUG:
+ print "dedent: %r: %r -> removed %d/%d"\
+ % (line, ch, removed, margin)
+ if removed == margin:
+ lines[i] = lines[i][j+1:]
+ break
+ elif removed > margin:
+ lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+ break
+ else:
+ if removed:
+ lines[i] = lines[i][removed:]
+ return lines
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+ """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+ "text" is the text to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ textwrap.dedent(s), but don't expand tabs to spaces
+ """
+ lines = text.splitlines(1)
+ _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+ return ''.join(lines)
+
+
+class _memoized(object):
+ """Decorator that caches a function's return value each time it is called.
+ If called later with the same arguments, the cached value is returned, and
+ not re-evaluated.
+
+ http://wiki.python.org/moin/PythonDecoratorLibrary
+ """
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+ def __call__(self, *args):
+ try:
+ return self.cache[args]
+ except KeyError:
+ self.cache[args] = value = self.func(*args)
+ return value
+ except TypeError:
+ # uncachable -- for instance, passing a list as an argument.
+ # Better to not cache than to blow up entirely.
+ return self.func(*args)
+ def __repr__(self):
+ """Return the function's docstring."""
+ return self.func.__doc__
+
+
+def _hr_tag_re_from_tab_width(tab_width):
+ return re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in \1
+ [ ]{0,%d}
+ <(hr) # start tag = \2
+ \b # word break
+ ([^<>])*? #
+ /?> # the matching end tag
+ [ \t]*
+ (?=\n{2,}|\Z) # followed by a blank line or end of document
+ )
+ """ % (tab_width - 1), re.X)
+_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
+
+
+def _xml_encode_email_char_at_random(ch):
+ r = random()
+ # Roughly 10% raw, 45% hex, 45% dec.
+ # '@' *must* be encoded. I [John Gruber] insist.
+ if r > 0.9 and ch != "@":
+ return ch
+ elif r < 0.45:
+ # The [1:] is to drop leading '0': 0x63 -> x63
+ return '&#%s;' % hex(ord(ch))[1:]
+ else:
+ return '&#%s;' % ord(ch)
+
+def _hash_text(text):
+ return 'md5:'+md5(text.encode("utf-8")).hexdigest()
+
+
+#---- mainline
+
+class _NoReflowFormatter(optparse.IndentedHelpFormatter):
+ """An optparse formatter that does NOT reflow the description."""
+ def format_description(self, description):
+ return description or ""
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+def main(argv=sys.argv):
+ if not logging.root.handlers:
+ logging.basicConfig()
+
+ usage = "usage: %prog [PATHS...]"
+ version = "%prog "+__version__
+ parser = optparse.OptionParser(prog="markdown2", usage=usage,
+ version=version, description=cmdln_desc,
+ formatter=_NoReflowFormatter())
+ parser.add_option("-v", "--verbose", dest="log_level",
+ action="store_const", const=logging.DEBUG,
+ help="more verbose output")
+ parser.add_option("--encoding",
+ help="specify encoding of text content")
+ parser.add_option("--html4tags", action="store_true", default=False,
+ help="use HTML 4 style for empty element tags")
+ parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
+ help="sanitize literal HTML: 'escape' escapes "
+ "HTML meta chars, 'replace' replaces with an "
+ "[HTML_REMOVED] note")
+ parser.add_option("-x", "--extras", action="append",
+ help="Turn on specific extra features (not part of "
+ "the core Markdown spec). Supported values: "
+ "'code-friendly' disables _/__ for emphasis; "
+ "'code-color' adds code-block syntax coloring; "
+ "'link-patterns' adds auto-linking based on patterns; "
+ "'footnotes' adds the footnotes syntax;"
+ "'pyshell' to put unindented Python interactive shell sessions in a <code> block.")
+ parser.add_option("--use-file-vars",
+ help="Look for and use Emacs-style 'markdown-extras' "
+ "file var to turn on extras. See "
+ "<http://code.google.com/p/python-markdown2/wiki/Extras>.")
+ parser.add_option("--link-patterns-file",
+ help="path to a link pattern file")
+ parser.add_option("--self-test", action="store_true",
+ help="run internal self-tests (some doctests)")
+ parser.add_option("--compare", action="store_true",
+ help="run against Markdown.pl as well (for testing)")
+ parser.set_defaults(log_level=logging.INFO, compare=False,
+ encoding="utf-8", safe_mode=None, use_file_vars=False)
+ opts, paths = parser.parse_args()
+ log.setLevel(opts.log_level)
+
+ if opts.self_test:
+ return _test()
+
+ if opts.extras:
+ extras = {}
+ for s in opts.extras:
+ splitter = re.compile("[,;: ]+")
+ for e in splitter.split(s):
+ if '=' in e:
+ ename, earg = e.split('=', 1)
+ try:
+ earg = int(earg)
+ except ValueError:
+ pass
+ else:
+ ename, earg = e, None
+ extras[ename] = earg
+ else:
+ extras = None
+
+ if opts.link_patterns_file:
+ link_patterns = []
+ f = open(opts.link_patterns_file)
+ try:
+ for i, line in enumerate(f.readlines()):
+ if not line.strip(): continue
+ if line.lstrip().startswith("#"): continue
+ try:
+ pat, href = line.rstrip().rsplit(None, 1)
+ except ValueError:
+ raise MarkdownError("%s:%d: invalid link pattern line: %r"
+ % (opts.link_patterns_file, i+1, line))
+ link_patterns.append(
+ (_regex_from_encoded_pattern(pat), href))
+ finally:
+ f.close()
+ else:
+ link_patterns = None
+
+ from os.path import join, dirname, abspath
+ markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
+ "Markdown.pl")
+ for path in paths:
+ if opts.compare:
+ print "==== Markdown.pl ===="
+ perl_cmd = 'perl %s "%s"' % (markdown_pl, path)
+ o = os.popen(perl_cmd)
+ perl_html = o.read()
+ o.close()
+ sys.stdout.write(perl_html)
+ print "==== markdown2.py ===="
+ html = markdown_path(path, encoding=opts.encoding,
+ html4tags=opts.html4tags,
+ safe_mode=opts.safe_mode,
+ extras=extras, link_patterns=link_patterns,
+ use_file_vars=opts.use_file_vars)
+ sys.stdout.write(
+ html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if opts.compare:
+ print "==== match? %r ====" % (perl_html == html)
+
+
+if __name__ == "__main__":
+ sys.exit( main(sys.argv) )
+
diff --git a/lib/mdx_typography.py b/lib/mdx_typography.py
@@ -1,166 +0,0 @@
-
-# vim:encoding=utf-8
-
-# from Joachim Schipper <joachim@joachimschipper.nl>
-# XXX license?
-
-"""
-This is a port of a small subset of John Gruber's SmartyPants
-(http://daringfireball.net/projects/smartypants/). It is compatible in the
-sense that it does not do anything that SmartyPants would not; however,
-SmartyPants tries to be considerably more intelligent, at the price of added
-complexity and (very rarely!) getting it wrong.
-
-This extension for Markdown performs the following translations of character
-sequences into entities (controlled by config['rules']):
-- ', '' and " into ``curly'' HTML quotes
-- -- and --- into en- and em-dashes
-- ... into an ellipsis
-
-It also marks all-caps words with <span class=mark>. This includes THIS,
-T.H.I.S. and even T0H0I0S0, but not T H I S. Set config['caps_re'] to None to
-disable, or to any regular expression to change what is matched. Any text
-'below' a <code> or <pre> tag is left alone (see config['caps_illegal_tags']).
-
-Finally, it prevents
-widows.
-
-Set config['no_widows'] to False to suppress this last behaviour.
-
-Each of these may be prevented by \\-escaping the relevant character(s) (but
-note that the second ' in \\'' will still be treated, etc.)
-"""
-# XXX Integrate with mdx_parser.py?
-
-import markdown
-import re
-
-class TypographyPattern(markdown.Pattern):
- def __init__(self, pattern, replace):
- # Note: markdown.Pattern uses greedy matching for the first part
- self.compiled_re = re.compile("^(.*?)%s(.*)$" % pattern, re.DOTALL)
- self.replace = tuple([(re.compile(p[0]), p[1]) for p in replace])
-
- def getCompiledRegExp(self):
- return self.compiled_re
-
- def handleMatch(self, m, doc):
- matched = m.group(2)
-
- for p, r in self.replace:
- if p.match(matched):
- return r
-
- assert(('Configuration error: %s, matched by %s, must be matched by any of %s' % (matched, self.compiled_re.pattern, [p[0].pattern for p in self.replace])) == True)
-
-class CapsPostprocessor(markdown.Postprocessor):
- def __init__(self, config):
- self.config = config
-
- def run(self, doc):
- elts = doc.find(lambda elt: elt.type == 'text' and self.config['caps_re'].search(elt.value))
-
- while elts:
- t = elts.pop()
-
- m = self.config['caps_re'].search(t.value)
- if not m:
- continue
-
- p = t.parent
- done = False
- while p != doc.documentElement:
- if p.type == 'element' and p.nodeName in self.config['caps_illegal_tags']:
- done = True
- break
-
- p = p.parent
-
- if done:
- continue
-
- span = doc.createElement('span')
- span.setAttribute('class', 'caps')
- span.appendChild(doc.createTextNode(m.group(1)))
- rest = doc.createTextNode(t.value[m.end():])
-
- idx = t.parent.childNodes.index(t) + 1
-
- t.parent.insertChild(idx, rest)
- t.parent.insertChild(idx, span)
- t.value = t.value[:m.start()]
-
- elts.append(rest)
-
-class WidowsPostprocessor(markdown.Postprocessor):
- def __init__(self, config):
- self.config = config
-
- def run(self, doc):
- """Prevent widows by turning the last piece of whitespace into
- """
-
- elts = doc.find(lambda elt: elt.type == 'element' and elt.nodeName == 'p')
-
- for p in elts:
- texts = p.find(lambda elt: elt.type == 'text')
-
- while texts:
- t = texts.pop()
-
- idx = max(t.value.rfind(' '), t.value.rfind('\n'))
-
- if idx != -1:
- # Replace by
- rest = doc.createTextNode(t.value[:idx])
- t.value = t.value[idx + 1:]
-
- idx = t.parent.childNodes.index(t)
-
- t.parent.insertChild(idx, doc.createEntityReference('nbsp'))
- t.parent.insertChild(idx, rest)
-
- break
-
-class TypographyExtension(markdown.Extension):
- def __init__(self, config):
- self.config = config
-
- def extendMarkdown(self, md, md_globals):
- idx = md.inlinePatterns.index(markdown.STRONG_EM_PATTERN)
-
- rules = self.config['rules']
- rules.reverse()
- for (opening_regex, rules) in rules:
- md.inlinePatterns.insert(idx, TypographyPattern(opening_regex, rules))
-
- if self.config['caps_re']:
- md.postprocessors.append(CapsPostprocessor(self.config))
- if self.config['no_widows']:
- md.postprocessors.append(WidowsPostprocessor(self.config))
-
-def makeExtension(config=[]):
- if not config:
- config = {}
- if not config.has_key('rules'):
- config['rules'] = [(r'(---?|\.\.\.)', # apply rule to this
- (('---', u'—'), # (regex, entity)
- ('--', u'–'),
- ('\.\.\.', u'…'))),# end of rule
- (r"\B(\"|''?)\b",
- (("\"|''", u'“'),
- ("'", u'‘'))),
- (r"(?:\b|(?<=[,.!?]))(\"|''?)",
- (("\"|''", u'”'),
- ("'", u'’')))]
- if not config.has_key('no_widows'):
- config['no_widows'] = False
- if not config.has_key('caps_re'):
- config['caps_re'] = r'\b([0-9]*(?:[A-Z][0-9]*){2,}|(?:[A-Z]\.){2,})(?=\b|[ ,.!?-])'
- if not config.has_key('caps_illegal_tags'):
- config['caps_illegal_tags'] = ['code', 'pre']
-
- #config['caps_re'] = re.compile(config['caps_re'])
- config['caps_re'] = None
-
- return TypographyExtension(config)
diff --git a/viewutils.py b/viewutils.py
@@ -1,12 +1,13 @@
import re
-import markdown
+from markdown2 import Markdown
import genshi
def mini_markdown(s):
# XXX find a more efficient way to do this?
- m = markdown.Markdown(extensions=['typography']).convert(s)
- the_p, = re.match(u'<p>(.*)\n</p>', m).groups()
- return genshi.Markup(the_p)
+ m = Markdown(extras=['code_friendly']).convert(s)
+ match = re.match(u'<p>(.*)</p>', m)
+ assert match, m
+ return genshi.Markup(match.group(1))
def tag_list(script_name, tags):
return genshi.Markup(u', ').join(