#### markup.py
import markdown,sys ,keyword,cgi,re,requests,os

Page = "index"
if len(sys.argv) == 2: 
  Page = sys.argv[1]

Style   = "http://unbox.org/open/trunk/2014/markup"
Content = "http://unbox.org/open/trunk/2014/markup"

if Page == "reestablish":
  for file in ["markup.py", "index.cgi"]:
    r= requests.get(Content + "/" + file)
    with open(file, "wb") as code: 
      code.write(r.content)
    os.chmod(file,755)
  exit()

def say(x):   
  sys.stdout.write(str(x)); sys.stdout.flush()

class Lexer(object):
  """Originally from http://www.gooli.org/snippets/pylexer.py
     http://www.gooli.org/blog/a-simple-lexer-in-python/. 
     Suggested css entries:
      pre         { color: #666;   }
      ._class     { color: #000; font-weight: bold; }
      ._fun       { color: #000; font-weight: bold}
      ._comment   { color: #f79a32; }
      ._key       { color: Blue; font-weight: bold; }
      ._num       { color: #099; }
      ._op       { color: #000000; }
      ._str       { color: #d14; font-weight: bold; }"""
  patterns = {}
  patterns['py']= [
    ("_comment" ,r"#.*$"),
    ("_fun"      ,r"(?<=def )([A-Za-z_][A-Za-z0-9_]*)\s*(?=[(:])"),
    ("_class"    ,r"(?<=class )([A-Za-z_][A-Za-z0-9_]*)\s*(?=[(:])"),
    ("_key"      ,r"\b(%s)\b" % "|".join(keyword.kwlist)),
    ("_str"      ,r"[ru]?(\"([^\"\\]|(\\.))*\")|('([^\'\\]|(\\.))*')"),
    ("_op"      ,r"[%s]+" % re.escape("<>=*/+-~!%&()|{}[],.?:")),
    ("_ident"    ,r"[A-Za-z_][A-Za-z0-9_]*"),
    ("_num"      ,r"[0-9.]+"),
    ("_white"    ,r"\s+"),
    ("_other"    ,r".+")]
  def __init__(i,defs,numbers=True):
    i.n, i.numbers = 0, numbers
    i.definitions = defs
    str= "|".join(["(?P<%s>%s)" 
                     % (x,y) for x,y in defs])
    i.regexp = re.compile(str, re.MULTILINE)
  def tags(i, text):
    out = []
    for ako, value in i.parse(i.nums(text)):
      tmp= "<span class='%s'>%s</span>" % (ako,cgi.escape(value)) 
      out += [re.sub("\n","<br>",tmp)]
    return ''.join(out)
  def nums(i,text):
    if not i.numbers: return text
    empty   = re.compile('^[\t ]*$')
    lines,out = text.splitlines(),[]
    for line in lines:
      i.n += 1
      out += ['' if empty.match(line) 
                 else '%5s: %s' % (i.n,line)]
    return "\n".join(out)
  def parse(i, text):
    for match in i.regexp.finditer(text):
      for name, rexp in i.definitions:
        m = match.group(name)
        if m is not None:
          yield (name, m)
          break

def xpands(txt,fields,sep="!!!"):
  def xpand(txt,lvl=0):
    if lvl > 10:
      yield "RECURSIVE INCLUDE ERROR"
    for i,part in enumerate(txt.split(sep)):
      if not i % 2:
        yield part
      else:
        one = fields.get(part,'???')
        if sep in one:
          for two in xpand(one,lvl+1):
            yield two
        else:
          yield one
  return (''.join(part for part in 
               xpand(txt,fields,sep))).splitlines()

def lines(fields,str):
  """1) Writes header data into 'fields';
     2) expands !!!key!!!! into fields[k];
     3) stores sections of text into field values"""
  detour, headerp = None, True
  header, this = {}, '0'
  header[this] = []
  for line1 in str.splitlines():
    for line2 in xpands(line1,fields,sep).splitlines():
      if header: 
        if not line:
          for k,v in header.items():
            fields[k] = ', '.join(v)
          headerp = False
        else:
          if re.search(r'^[ \t]+',line2):
            header[this] += [line2.strip()]
          else:
            parts = line2.split(':')
            this  = parts[0].strip()
            that  = ':'.join(parts[1:]).strip()
            header[this] = [that]    
      else:
        if re.search(r'^"<',line2):
          detour = strip(re.split(r'<|>',line2)[1])
          fields[detour] = ""
          continue
        if re.search(r'^">',line2):
          detour = None
          continue
        if detour:
          fields[detour] += line2
        else:
          yield line2.strip('\n')

def parse(str,fields=dict(Content=Content,
                          Style=Style,
                          Template='default')):
  lex = Lexer(Lexer.patterns['py'])
  txt, code, codep = [], [], False
  def dumpCode(code):
    if code:
      tmp = '\n'.join(code)
      txt.extend(["<pre>"]+[lex.tags(tmp)]+["</pre>"])
    return []
  for line in lines(fields,str):
    if re.search(r'^"""',line):
      codep = not codep
      code = [] if codep else dumpCode(code)
    else:
      if codep: code += [line]
      else:      txt += [line]
  dumpCode(code)
  return fields,'\n'.join(txt)

# need to get the template from the header
def main(sep='!!!',xtend=[
          'footnotes', 'def_list',
          'tables','toc','meta']):
  template = fields['Style'] + '/templates/' + fields['Template']
  head     = requests.get(template + '/head.html').text
  body     = requests.get(Content + "/" + Page).text
  foot     = requests.get(template + '/foot.html').text
  txt      = parse(head + body + foot, fields)
  print markdown.Markdown(extensions=xtend).convert(txt)

main()
