2011-11-02 21:07:51 +00:00
|
|
|
/**
|
2011-12-08 11:40:59 +00:00
|
|
|
* Tokenizer for wikitext, using PEG.js and a separate PEG grammar file
|
|
|
|
* (pegTokenizer.pegjs.txt)
|
2011-11-02 21:07:51 +00:00
|
|
|
*
|
2011-12-08 11:40:59 +00:00
|
|
|
* Use along with a HTML5TreeBuilder and the DOMPostProcessor(s) for HTML
|
|
|
|
* output.
|
2012-02-21 18:26:40 +00:00
|
|
|
*
|
2011-11-02 21:07:51 +00:00
|
|
|
*/
|
2011-12-12 14:03:54 +00:00
|
|
|
|
2011-12-28 17:04:16 +00:00
|
|
|
var PEG = require('pegjs'),
|
|
|
|
path = require('path'),
|
2012-01-03 18:44:31 +00:00
|
|
|
fs = require('fs'),
|
2012-01-04 08:39:45 +00:00
|
|
|
$ = require('jquery'),
|
2012-02-01 16:30:43 +00:00
|
|
|
events = require('events'),
|
|
|
|
defines = require('./mediawiki.parser.defines.js');
|
2011-12-12 14:03:54 +00:00
|
|
|
|
2011-12-28 17:04:16 +00:00
|
|
|
function PegTokenizer() {
|
2011-11-02 21:07:51 +00:00
|
|
|
}
|
|
|
|
|
2012-02-01 16:30:43 +00:00
|
|
|
|
|
|
|
|
2012-01-03 18:44:31 +00:00
|
|
|
// Inherit from EventEmitter
|
|
|
|
PegTokenizer.prototype = new events.EventEmitter();
|
2012-01-04 12:28:41 +00:00
|
|
|
PegTokenizer.prototype.constructor = PegTokenizer;
|
2012-01-03 18:44:31 +00:00
|
|
|
|
2011-12-08 10:59:44 +00:00
|
|
|
PegTokenizer.src = false;
|
2011-11-02 21:07:51 +00:00
|
|
|
|
2012-02-21 18:26:40 +00:00
|
|
|
/*
|
|
|
|
* The main worker. Sets up event emission ('chunk' and 'end' events).
|
|
|
|
* Consumers are supposed to register with PegTokenizer before calling
|
|
|
|
* process().
|
|
|
|
*/
|
2012-01-09 19:33:49 +00:00
|
|
|
PegTokenizer.prototype.process = function( text ) {
|
2011-12-12 14:03:54 +00:00
|
|
|
var out, err;
|
2012-03-08 09:00:45 +00:00
|
|
|
if ( !this.tokenizer ) {
|
|
|
|
// Construct a singleton static tokenizer.
|
2012-03-01 18:07:20 +00:00
|
|
|
var pegSrcPath = path.join( __dirname, 'pegTokenizer.pegjs.txt' );
|
|
|
|
this.src = fs.readFileSync( pegSrcPath, 'utf8' );
|
2012-03-08 09:00:45 +00:00
|
|
|
var tokenizerSource = PEG.buildParser(this.src).toSource();
|
|
|
|
|
|
|
|
/* We patch the generated source to assign the arguments array for the
|
|
|
|
* parse function to a function-scoped variable. We use this to pass
|
|
|
|
* in callbacks and other information, which can be used from actions
|
|
|
|
* run when matching a production. In particular, we pass in a
|
|
|
|
* callback called for a chunk of tokens in toplevelblock. Setting this
|
|
|
|
* callback per call to parse() keeps the tokenizer reentrant, so that it
|
|
|
|
* can be reused to expand templates while a main parse is ongoing.
|
|
|
|
* PEG tokenizer construction is very expensive, so having a single
|
|
|
|
* reentrant tokenizer is a big win.
|
|
|
|
*
|
|
|
|
* We could also make modules available to the tokenizer by prepending
|
|
|
|
* requires to the source.
|
|
|
|
*/
|
|
|
|
tokenizerSource = tokenizerSource.replace( 'parse: function(input, startRule) {',
|
2012-01-22 23:21:53 +00:00
|
|
|
'parse: function(input, startRule) { var __parseArgs = arguments;' );
|
2012-03-08 09:00:45 +00:00
|
|
|
//console.warn( tokenizerSource );
|
|
|
|
PegTokenizer.prototype.tokenizer = eval( tokenizerSource );
|
|
|
|
// alias the parse method
|
|
|
|
this.tokenizer.tokenize = this.tokenizer.parse;
|
2011-12-12 14:03:54 +00:00
|
|
|
}
|
2011-12-28 01:37:11 +00:00
|
|
|
|
2012-02-21 18:26:40 +00:00
|
|
|
// Some input normalization: force a trailing newline
|
2011-12-30 21:51:03 +00:00
|
|
|
if ( text.substring(text.length - 1) !== "\n" ) {
|
2011-12-28 01:37:11 +00:00
|
|
|
text += "\n";
|
|
|
|
}
|
|
|
|
|
2012-01-03 18:44:31 +00:00
|
|
|
// XXX: Commented out exception handling during development to get
|
2012-02-21 18:26:40 +00:00
|
|
|
// reasonable traces.
|
2012-01-03 18:44:31 +00:00
|
|
|
//try {
|
2012-03-08 09:00:45 +00:00
|
|
|
this.tokenizer.tokenize(text, 'start',
|
2012-02-21 17:21:42 +00:00
|
|
|
// callback
|
|
|
|
this.emit.bind( this, 'chunk' ),
|
|
|
|
// inline break test
|
|
|
|
this
|
|
|
|
);
|
2012-01-03 18:44:31 +00:00
|
|
|
this.emit('end');
|
|
|
|
//} catch (e) {
|
|
|
|
//err = e;
|
|
|
|
//console.trace();
|
|
|
|
//} finally {
|
|
|
|
return { err: err };
|
|
|
|
//}
|
|
|
|
};
|
2011-12-28 01:37:15 +00:00
|
|
|
|
2012-03-02 13:36:37 +00:00
|
|
|
PegTokenizer.prototype.processImageOptions = function( text ) {
|
2012-03-08 09:00:45 +00:00
|
|
|
return this.tokenizer.tokenize(text, 'img_options', null, this );
|
2012-03-02 13:36:37 +00:00
|
|
|
};
|
2012-02-21 18:26:40 +00:00
|
|
|
|
2012-03-08 09:00:45 +00:00
|
|
|
/**
|
|
|
|
* Tokenize a URL
|
|
|
|
*/
|
|
|
|
PegTokenizer.prototype.tokenizeURL = function( text ) {
|
2012-03-05 15:34:27 +00:00
|
|
|
try {
|
2012-03-08 09:00:45 +00:00
|
|
|
return this.tokenizer.tokenize(text, 'url', null, this );
|
2012-03-05 15:34:27 +00:00
|
|
|
} catch ( e ) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-02-21 18:26:40 +00:00
|
|
|
/*
|
|
|
|
* Inline breaks, flag-enabled production which detects end positions for
|
|
|
|
* active higher-level productions in inline and other nested productions.
|
|
|
|
* Those inner productions are then exited, so that the outer production can
|
|
|
|
* handle the end marker.
|
|
|
|
*/
|
2012-03-12 13:08:43 +00:00
|
|
|
PegTokenizer.prototype.inline_breaks = function (input, pos, stops ) {
|
|
|
|
var counters = stops.counters;
|
2012-02-21 18:26:40 +00:00
|
|
|
switch( input[pos] ) {
|
|
|
|
case '=':
|
2012-03-12 13:08:43 +00:00
|
|
|
return stops.onStack( 'equal' ) ||
|
|
|
|
( counters.h &&
|
2012-02-21 18:26:40 +00:00
|
|
|
input.substr( pos + 1, 200)
|
|
|
|
.match(/[ \t]*[\r\n]/) !== null ) || null;
|
|
|
|
case '|':
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.pipe ||
|
|
|
|
counters.template ||
|
|
|
|
( counters.table &&
|
2012-02-21 18:26:40 +00:00
|
|
|
( input[pos + 1].match(/[|}]/) !== null ||
|
2012-03-12 13:08:43 +00:00
|
|
|
counters.tableCellArg
|
2012-02-21 18:26:40 +00:00
|
|
|
)
|
|
|
|
) || null;
|
2012-03-12 17:31:45 +00:00
|
|
|
case '{':
|
2012-03-13 12:32:31 +00:00
|
|
|
// {{!}} pipe templates..
|
2012-03-12 17:31:45 +00:00
|
|
|
return (
|
|
|
|
counters.pipe ||
|
|
|
|
counters.template
|
|
|
|
) && input.substr( pos, 5 ) === '{{!}}'
|
|
|
|
|| null;
|
2012-02-21 18:26:40 +00:00
|
|
|
case "!":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.table && input[pos + 1] === "!" ||
|
2012-02-21 18:26:40 +00:00
|
|
|
null;
|
|
|
|
case "}":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.template && input[pos + 1] === "}" || null;
|
2012-02-21 18:26:40 +00:00
|
|
|
case ":":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.colon &&
|
|
|
|
! counters.extlink &&
|
|
|
|
! counters.linkdesc || null;
|
2012-02-21 18:26:40 +00:00
|
|
|
case "\r":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.table &&
|
2012-02-21 18:26:40 +00:00
|
|
|
input.substr(pos, 4).match(/\r\n?[!|]/) !== null ||
|
|
|
|
null;
|
|
|
|
case "\n":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.table &&
|
2012-02-21 18:26:40 +00:00
|
|
|
input[pos + 1] === '!' ||
|
|
|
|
input[pos + 1] === '|' ||
|
|
|
|
null;
|
|
|
|
case "]":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.extlink ||
|
|
|
|
( counters.linkdesc && input[pos + 1] === ']' ) ||
|
2012-02-21 18:26:40 +00:00
|
|
|
null;
|
|
|
|
case "<":
|
2012-03-12 13:08:43 +00:00
|
|
|
return counters.pre && input.substr( pos, 6 ) === '</pre>' || null;
|
2012-02-21 18:26:40 +00:00
|
|
|
default:
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Alternate version of the above. The hash is likely faster, but the nested
|
|
|
|
// function calls seem to cancel that out.
|
2012-02-21 17:21:42 +00:00
|
|
|
PegTokenizer.prototype.breakMap = {
|
|
|
|
'=': function(input, pos, syntaxFlags) {
|
|
|
|
return syntaxFlags.equal ||
|
|
|
|
( syntaxFlags.h &&
|
|
|
|
input.substr( pos + 1, 200)
|
|
|
|
.match(/[ \t]*[\r\n]/) !== null ) || null;
|
|
|
|
},
|
|
|
|
'|': function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.template ||
|
2012-03-05 12:00:38 +00:00
|
|
|
syntaxFlags.linkdesc ||
|
2012-02-21 17:21:42 +00:00
|
|
|
( syntaxFlags.table &&
|
|
|
|
( input[pos + 1].match(/[|}]/) !== null ||
|
|
|
|
syntaxFlags.tableCellArg
|
|
|
|
)
|
|
|
|
) || null;
|
|
|
|
},
|
|
|
|
"!": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.table && input[pos + 1] === "!" ||
|
|
|
|
null;
|
|
|
|
},
|
|
|
|
"}": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.template && input[pos + 1] === "}" || null;
|
|
|
|
},
|
|
|
|
":": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.colon &&
|
|
|
|
! syntaxFlags.extlink &&
|
|
|
|
! syntaxFlags.linkdesc || null;
|
|
|
|
},
|
|
|
|
"\r": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.table &&
|
2012-02-21 17:57:30 +00:00
|
|
|
input.substr(pos, 4).match(/\r\n?[!|]/) !== null ||
|
2012-02-21 17:21:42 +00:00
|
|
|
null;
|
|
|
|
},
|
|
|
|
"\n": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.table &&
|
2012-02-21 17:57:30 +00:00
|
|
|
input[pos + 1] === '!' ||
|
|
|
|
input[pos + 1] === '|' ||
|
2012-02-21 17:21:42 +00:00
|
|
|
null;
|
|
|
|
},
|
|
|
|
"]": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.extlink ||
|
|
|
|
( syntaxFlags.linkdesc && input[pos + 1] === ']' ) ||
|
|
|
|
null;
|
|
|
|
},
|
|
|
|
"<": function ( input, pos, syntaxFlags ) {
|
|
|
|
return syntaxFlags.pre && input.substr( pos, 6 ) === '</pre>' || null;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-02-21 18:26:40 +00:00
|
|
|
PegTokenizer.prototype.inline_breaks_hash = function (input, pos, syntaxFlags ) {
|
2012-02-21 17:57:30 +00:00
|
|
|
return this.breakMap[ input[pos] ]( input, pos, syntaxFlags);
|
|
|
|
//console.warn( 'ilbn res: ' + JSON.stringify( [ res, input.substr( pos, 4 ) ] ) );
|
|
|
|
//return res;
|
|
|
|
};
|
|
|
|
|
2011-11-02 21:07:51 +00:00
|
|
|
|
2012-02-21 17:21:42 +00:00
|
|
|
|
2011-11-02 21:07:51 +00:00
|
|
|
if (typeof module == "object") {
|
2011-12-08 10:59:44 +00:00
|
|
|
module.exports.PegTokenizer = PegTokenizer;
|
2011-11-02 21:07:51 +00:00
|
|
|
}
|