mediawiki-extensions-Visual.../modules/parser/ext.core.NoIncludeOnly.js

141 lines
3.8 KiB
JavaScript
Raw Normal View History

/**
* Simple noinclude / onlyinclude implementation. Strips all tokens in
* noinclude sections.
*
* @author Gabriel Wicke <gwicke@wikimedia.org>
*/
var TokenCollector = require( './ext.util.TokenCollector.js' ).TokenCollector;
/**
* OnlyInclude sadly forces synchronous template processing, as it needs to
* hold onto all tokens in case an onlyinclude block is encountered later.
* This can fortunately be worked around by caching the tokens after
* onlyinclude processing (which is a good idea anyway).
*/
function OnlyInclude( manager, isInclude ) {
this.manager = manager;
if ( isInclude ) {
this.accum = [];
this.inOnlyInclude = false;
this.foundOnlyInclude = false;
// register for 'any' token, collect those
this.manager.addTransform( this.onAnyInclude.bind( this ), this.rank, 'any' );
} else {
// just convert onlyinclude tokens into meta tags with rt info
this.manager.addTransform( this.onOnlyInclude.bind( this ), this.rank,
'tag', 'onlyinclude' );
}
}
OnlyInclude.prototype.rank = 0.01; // Before any further processing
OnlyInclude.prototype.onOnlyInclude = function ( token, manager ) {
var meta = new TagTk( 'meta' );
meta.dataAttribs = { strippedTokens: [token] };
return { token: meta };
};
OnlyInclude.prototype.onAnyInclude = function ( token, manager ) {
//this.manager.env.dp( 'onAnyInclude', token, this );
if ( token.constructor === EOFTk ) {
this.inOnlyInclude = false;
if ( this.accum.length && ! this.foundOnlyInclude ) {
var res = this.accum;
res.push( token );
this.accum = [];
Big token transform framework overhaul part 2 * Tokens are now immutable. The progress of transformations is tracked on chunks instead of tokens. Tokenizer output is cached and can be directly returned without a need for cloning. Transforms are required to clone or newly create tokens they are modifying. * Expansions per chunk are now shared between equivalent frames via a cache stored on the chunk itself. Equivalence of frames is not yet ideal though, as right now a hash tree of *unexpanded* arguments is used. This should be switched to a hash of the fully expanded local parameters instead. * There is now a vastly improved maybeSyncReturn wrapper for async transforms that either forwards processing to the iterative transformTokens if the current transform is still ongoing, or manages a recursive transformation if needed. * Parameters for parser functions are now wrapped in abstract Params and ParserValue objects, which support some handy on-demand *value* expansions. Keys are always expanded. Parser functions are converted to use these interfaces, and now properly expand their values in the correct frame. Making this expansion lazier is certainly possible, but would complicate transformTokens and other token-handling machinery. Need to investigate if it would really be worth it. Dead branch elimination is certainly a bigger win overall. * Complex recursive asynchronous expansions should now be closer to correct for both the iterative (transformTokens) and recursive (maybeSyncReturn after transformTokens has returned) code paths. * Performance degraded slightly. There are no micro-optimizations done yet and the shared expansion cache still has a low hit rate. The progress tracking on chunks is not yet perfect, so there are likely a lot of unneeded re-expansions that can be easily eliminated. There is also more debug tracing right now. Obama currently expands in 54 seconds on my laptop. Change-Id: I4a603f3d3c70ca657ebda9fbb8570269f943d6b6
2012-05-10 08:04:24 +00:00
//this.manager.setTokensRank( res, this.rank + 0.001 );
return { tokens: res };
} else {
this.foundOnlyInclude = false;
this.accum = [];
return { token: token };
}
} else if ( ( token.constructor === TagTk ||
token.constructor === EndTagTk ||
token.constructor === SelfclosingTagTk ) &&
token.name === 'onlyinclude' ) {
var meta;
if ( ! this.inOnlyInclude ) {
this.foundOnlyInclude = true;
this.inOnlyInclude = true;
// wrap collected tokens into meta tag for round-tripping
meta = new TagTk( 'meta' );
this.accum.push( token );
meta.dataAttribs = { strippedTokens: this.accum };
this.accum = [];
return meta;
} else {
this.inOnlyInclude = false;
meta = new TagTk( 'meta' );
meta.dataAttribs = { strippedTokens: [token] };
}
Big token transform framework overhaul part 2 * Tokens are now immutable. The progress of transformations is tracked on chunks instead of tokens. Tokenizer output is cached and can be directly returned without a need for cloning. Transforms are required to clone or newly create tokens they are modifying. * Expansions per chunk are now shared between equivalent frames via a cache stored on the chunk itself. Equivalence of frames is not yet ideal though, as right now a hash tree of *unexpanded* arguments is used. This should be switched to a hash of the fully expanded local parameters instead. * There is now a vastly improved maybeSyncReturn wrapper for async transforms that either forwards processing to the iterative transformTokens if the current transform is still ongoing, or manages a recursive transformation if needed. * Parameters for parser functions are now wrapped in abstract Params and ParserValue objects, which support some handy on-demand *value* expansions. Keys are always expanded. Parser functions are converted to use these interfaces, and now properly expand their values in the correct frame. Making this expansion lazier is certainly possible, but would complicate transformTokens and other token-handling machinery. Need to investigate if it would really be worth it. Dead branch elimination is certainly a bigger win overall. * Complex recursive asynchronous expansions should now be closer to correct for both the iterative (transformTokens) and recursive (maybeSyncReturn after transformTokens has returned) code paths. * Performance degraded slightly. There are no micro-optimizations done yet and the shared expansion cache still has a low hit rate. The progress tracking on chunks is not yet perfect, so there are likely a lot of unneeded re-expansions that can be easily eliminated. There is also more debug tracing right now. Obama currently expands in 54 seconds on my laptop. Change-Id: I4a603f3d3c70ca657ebda9fbb8570269f943d6b6
2012-05-10 08:04:24 +00:00
//meta.rank = this.rank;
return { token: meta };
} else {
if ( this.inOnlyInclude ) {
Big token transform framework overhaul part 2 * Tokens are now immutable. The progress of transformations is tracked on chunks instead of tokens. Tokenizer output is cached and can be directly returned without a need for cloning. Transforms are required to clone or newly create tokens they are modifying. * Expansions per chunk are now shared between equivalent frames via a cache stored on the chunk itself. Equivalence of frames is not yet ideal though, as right now a hash tree of *unexpanded* arguments is used. This should be switched to a hash of the fully expanded local parameters instead. * There is now a vastly improved maybeSyncReturn wrapper for async transforms that either forwards processing to the iterative transformTokens if the current transform is still ongoing, or manages a recursive transformation if needed. * Parameters for parser functions are now wrapped in abstract Params and ParserValue objects, which support some handy on-demand *value* expansions. Keys are always expanded. Parser functions are converted to use these interfaces, and now properly expand their values in the correct frame. Making this expansion lazier is certainly possible, but would complicate transformTokens and other token-handling machinery. Need to investigate if it would really be worth it. Dead branch elimination is certainly a bigger win overall. * Complex recursive asynchronous expansions should now be closer to correct for both the iterative (transformTokens) and recursive (maybeSyncReturn after transformTokens has returned) code paths. * Performance degraded slightly. There are no micro-optimizations done yet and the shared expansion cache still has a low hit rate. The progress tracking on chunks is not yet perfect, so there are likely a lot of unneeded re-expansions that can be easily eliminated. There is also more debug tracing right now. Obama currently expands in 54 seconds on my laptop. Change-Id: I4a603f3d3c70ca657ebda9fbb8570269f943d6b6
2012-05-10 08:04:24 +00:00
//token.rank = this.rank;
return { token: token };
} else {
this.accum.push( token );
return { };
}
}
};
function NoInclude( manager, isInclude ) {
new TokenCollector(
manager,
function ( tokens ) {
if ( isInclude ) {
//manager.env.tp( 'noinclude stripping' );
return {};
} else {
tokens.shift();
if ( tokens.length &&
tokens[tokens.length - 1].constructor !== EOFTk ) {
tokens.pop();
}
return { tokens: tokens };
}
}, // just strip it all..
true, // match the end-of-input if </noinclude> is missing
0.02, // very early in stage 1, to avoid any further processing.
'tag',
'noinclude'
);
}
// XXX: Preserve includeonly content in meta tag (data attribute) for
// round-tripping!
function IncludeOnly( manager, isInclude ) {
new TokenCollector(
manager,
function ( tokens ) {
if ( isInclude ) {
tokens.shift();
if ( tokens.length &&
tokens[tokens.length - 1].constructor !== EOFTk ) {
tokens.pop();
}
return { tokens: tokens };
} else {
manager.env.tp( 'includeonly stripping' );
return {};
}
},
true, // match the end-of-input if </noinclude> is missing
0.03, // very early in stage 1, to avoid any further processing.
'tag',
'includeonly'
);
}
if (typeof module == "object") {
module.exports.NoInclude = NoInclude;
module.exports.IncludeOnly = IncludeOnly;
module.exports.OnlyInclude = OnlyInclude;
}