mirror of
https://gerrit.wikimedia.org/r/mediawiki/extensions/VisualEditor
synced 2024-11-30 00:55:00 +00:00
8368e17d6a
* All parser pipelines including tokenizer and DOM stuff are now constructed from a 'recipe' data structure in a ParserPipelineFactory. * All sub-pipelines of these can now be cached * Event registrations to a pipeline are directly forwarded to the last pipeline member to save relatively expensive event forwarding. * Some APIs for on-demand expansion / format conversion of parameters from parser functions are added: param.to('tokens/expanded', cb) param.to('text/wiki', cb) (this does not work yet) All parameters are additionally wrapped into a Param object that provides method for positional parameter naming (.named() or conversion to a dict (.dict()). * The async token transform manager is now separated from a frame object, with the frame holding arguments, an on-demand expansion method and loop checks. * Only keys of template parameters are now expanded. Parser functions or template arguments trigger an expansion on-demand. This (unsurprisingly) makes a big performance difference with typical switch-heavy template systems. * Return values from async transforms are no longer used in favor of plain callbacks. This saves the complication of having to maintain two code paths. A trick in transformTokens still avoids the construction of unneeded TokenAccumulators. * The results of template expansions are no longer buffered. * 301 parser tests are passing Known issues: * Cosmetic cleanup remains to do * Some parser functions do not support async expansions yet, and need to be modified. Change-Id: I1a7690baffbe8141cadf67270904a1b2e1df879a
142 lines
3.9 KiB
JavaScript
142 lines
3.9 KiB
JavaScript
/* Front-end/Wrapper for a particular tree builder, in this case the
|
|
* parser/tree builder from the node 'html5' module. Feed it tokens using
|
|
* processToken, and it will build you a DOM tree retrievable using .document
|
|
* or .body(). */
|
|
|
|
var events = require('events'),
|
|
HTML5 = require('./html5/index');
|
|
|
|
FauxHTML5 = {};
|
|
|
|
|
|
FauxHTML5.TreeBuilder = function ( env ) {
|
|
// The parser we are going to emit our tokens to
|
|
this.parser = new HTML5.Parser();
|
|
|
|
// Sets up the parser
|
|
this.parser.parse(this);
|
|
|
|
// implicitly start a new document
|
|
this.processToken(new TagTk( 'body' ));
|
|
|
|
this.env = env;
|
|
};
|
|
|
|
// Inherit from EventEmitter
|
|
FauxHTML5.TreeBuilder.prototype = new events.EventEmitter();
|
|
FauxHTML5.TreeBuilder.prototype.constructor = FauxHTML5.TreeBuilder;
|
|
|
|
/**
|
|
* Register for (token) 'chunk' and 'end' events from a token emitter,
|
|
* normally the TokenTransformDispatcher.
|
|
*/
|
|
FauxHTML5.TreeBuilder.prototype.addListenersOn = function ( emitter ) {
|
|
emitter.addListener('chunk', this.onChunk.bind( this ) );
|
|
emitter.addListener('end', this.onEnd.bind( this ) );
|
|
};
|
|
|
|
FauxHTML5.TreeBuilder.prototype.onChunk = function ( tokens ) {
|
|
this.env.dp( 'chunk: ' + JSON.stringify( tokens, null, 2 ) );
|
|
for (var i = 0, length = tokens.length; i < length; i++) {
|
|
this.processToken(tokens[i]);
|
|
}
|
|
};
|
|
|
|
FauxHTML5.TreeBuilder.prototype.onEnd = function ( ) {
|
|
//console.warn('Fauxhtml5 onEnd');
|
|
// FIXME HACK: For some reason the end token is not processed sometimes,
|
|
// which normally fixes the body reference up.
|
|
var document = this.parser.document;
|
|
document.body = document.getElementsByTagName('body')[0];
|
|
|
|
//console.warn( 'onEnd: ' + document.body.innerHTML );
|
|
|
|
this.emit( 'document', document );
|
|
|
|
// XXX: more clean up to allow reuse.
|
|
this.parser.setup();
|
|
this.processToken(new TagTk( 'body' ));
|
|
};
|
|
|
|
FauxHTML5.TreeBuilder.prototype._att = function (maybeAttribs) {
|
|
var atts = [];
|
|
if ( maybeAttribs && $.isArray( maybeAttribs ) ) {
|
|
for(var i = 0, length = maybeAttribs.length; i < length; i++) {
|
|
var att = maybeAttribs[i];
|
|
atts.push({nodeName: att.k, nodeValue: att.v});
|
|
}
|
|
}
|
|
return atts;
|
|
};
|
|
|
|
// Adapt the token format to internal HTML tree builder format, call the actual
|
|
// html tree builder by emitting the token.
|
|
FauxHTML5.TreeBuilder.prototype.processToken = function (token) {
|
|
if ( token.dataAttribs ) {
|
|
if ( ! token.attribs ) {
|
|
token.attribs = [];
|
|
}
|
|
token.attribs.push(
|
|
{
|
|
// Mediawiki-specific round-trip / non-semantic information
|
|
k: 'data-mw',
|
|
v: JSON.stringify( token.dataAttribs )
|
|
} );
|
|
}
|
|
|
|
switch( token.constructor ) {
|
|
case String:
|
|
this.emit('token', {type: 'Characters', data: token});
|
|
break;
|
|
case NlTk:
|
|
break;
|
|
case TagTk:
|
|
this.emit('token', {type: 'StartTag',
|
|
name: token.name,
|
|
data: this._att(token.attribs)});
|
|
break;
|
|
case SelfclosingTagTk:
|
|
this.emit('token', {type: 'StartTag',
|
|
name: token.name,
|
|
data: this._att(token.attribs)});
|
|
if ( HTML5.VOID_ELEMENTS.indexOf( token.name.toLowerCase() ) < 0 ) {
|
|
// VOID_ELEMENTS are automagically treated as self-closing by
|
|
// the tree builder
|
|
this.emit('token', {type: 'EndTag',
|
|
name: token.name,
|
|
data: this._att(token.attribs)});
|
|
}
|
|
break;
|
|
case EndTagTk:
|
|
this.emit('token', {type: 'EndTag',
|
|
name: token.name,
|
|
data: this._att(token.attribs)});
|
|
break;
|
|
case CommentTk:
|
|
this.emit('token', {type: 'Comment',
|
|
data: token.value});
|
|
break;
|
|
case EOFTk:
|
|
this.emit('end');
|
|
this.emit('token', { type: 'EOF' } );
|
|
this.document = this.parser.document;
|
|
if ( ! this.document.body ) {
|
|
// HACK: This should not be needed really.
|
|
this.document.body = this.parser.document.getElementsByTagName('body')[0];
|
|
}
|
|
// Emit the document to consumers
|
|
//this.emit('document', this.document);
|
|
break;
|
|
default:
|
|
console.warn("Unhandled token: " + JSON.stringify(token));
|
|
break;
|
|
break;
|
|
}
|
|
};
|
|
|
|
|
|
|
|
if (typeof module == "object") {
|
|
module.exports.FauxHTML5 = FauxHTML5;
|
|
}
|