Lwelling has uploaded a new change for review.

  https://gerrit.wikimedia.org/r/69156


Change subject: Work in progress porting from SQLite to MySQL. Basic fetch 
queries should work but that path will have a lot of debug output that will 
want deleting. Stats and other queries not ported at all Connection variables 
currently hardcoded and should be a config
......................................................................

Work in progress porting from SQLite to MySQL.
Basic fetch queries should work but that path will have a lot of debug
output that will want deleting.
Stats and other queries not ported at all
Connection variables currently hardcoded and should be a config
This branch started out as an attempt to do transactionless SQLite version
with atomic operations so there may be cruft from that to clean up.

Change-Id: If4b2c2518f296a89a409a3a5328de5fc88aeac3b
---
M js/tests/server/server.js
A js/tests/server/sql/create_everything.mysql
R js/tests/server/sql/create_everything.sqlite
3 files changed, 251 insertions(+), 109 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/mediawiki/extensions/Parsoid 
refs/changes/56/69156/1

diff --git a/js/tests/server/server.js b/js/tests/server/server.js
index 9a0d2d5..7057ad9 100755
--- a/js/tests/server/server.js
+++ b/js/tests/server/server.js
@@ -2,12 +2,21 @@
 ( function () {
 "use strict";
 
+var mysql = require( 'mysql' );
+var db = mysql.createConnection({
+       host     : 'localhost',
+       database : 'parsoid',
+       user     : 'parsoid',
+       password : 'parsoidpw',
+       multipleStatements : true,
+});
+
+db.connect();
+
 var http = require( 'http' ),
        express = require( 'express' ),
-       sqlite = require( 'sqlite3' ),
        dbStack = [], dbFlag = false,
        argv = require( 'optimist' ).argv,
-       db = new sqlite.Database( argv._[0] || '/mnt/rtserver/pages.db' ),
        // The maximum number of tries per article
        maxTries = 6,
        // The maximum number of fetch retries per article
@@ -17,8 +26,8 @@
 
 var counter = 0;
 
-// ----------------- Prepared queries --------------
-var dbGetTitle = db.prepare(
+// ----------------- The queries --------------
+var dbGetTitle = 
        'SELECT pages.id, pages.title, pages.prefix ' +
        'FROM pages ' +
        'LEFT JOIN claims ON pages.id = claims.page_id AND claims.commit_hash = 
? ' +
@@ -27,9 +36,15 @@
        '( claims.id IS NULL OR ' +
        '( claims.has_errorless_result = 0 AND claims.num_tries <= ? AND 
claims.timestamp < ? ) ) ' +
        'ORDER BY stats.score DESC, ' +
-       'claims.timestamp ASC LIMIT 1 OFFSET ? ' );
+       'claims.timestamp ASC LIMIT 1 OFFSET ? ';
 
-var dbGetTitleRandom = db.prepare(
+var dbFindTitleForClaim = 
+       'SELECT pages.id, pages.title, pages.prefix ' +
+       'FROM pages, claims ' +
+       'WHERE pages.id = ? AND pages.id = claims.page_id AND ' +
+       'claims.commit_hash= ?';
+
+var dbGetTitleRandom = 
        'SELECT pages.id, pages.title, pages.prefix ' +
        'FROM pages ' +
        'LEFT JOIN claims ON pages.id = claims.page_id AND claims.commit_hash = 
? ' +
@@ -38,75 +53,102 @@
        '( claims.id IS NULL OR ' +
        '( claims.has_errorless_result = 0 AND claims.num_tries <= ? AND 
claims.timestamp < ? ) ) ' +
        'ORDER BY stats.score DESC, ' +
-       'claims.timestamp ASC, RANDOM() LIMIT 1' );
+       'claims.timestamp ASC, RANDOM() LIMIT 1';
 
-var dbIncrementFetchErrorCount = db.prepare(
-       'UPDATE pages SET num_fetch_errors = num_fetch_errors + 1 WHERE title = 
? AND prefix = ?');
+var dbIncrementFetchErrorCount = 
+       'UPDATE pages SET num_fetch_errors = num_fetch_errors + 1 WHERE title = 
? AND prefix = ?';
 
-var dbClearFetchErrorCount = db.prepare(
-       'UPDATE pages SET num_fetch_errors = 0 WHERE title = ? and prefix = ?');
+var dbClearFetchErrorCount = 
+       'UPDATE pages SET num_fetch_errors = 0 WHERE title = ? and prefix = ?';
 
-var dbInsertCommit = db.prepare(
+var dbInsertCommit = 
        'INSERT OR IGNORE INTO commits ( hash, timestamp ) ' +
-       'VALUES ( ?, ? )' );
+       'VALUES ( ?, ? )';
 
-var dbFindClaimByPageId = db.prepare(
+var dbFindClaimByPageId = 
        'SELECT claims.id, claims.num_tries FROM claims ' +
-       'WHERE claims.page_id = ? AND claims.commit_hash = ?');
+       'WHERE claims.page_id = ? AND claims.commit_hash = ?';
 
-var dbFindClaimByTitle = db.prepare(
+var dbFindClaimByTitle = 
        'SELECT claims.id, claims.num_tries, claims.page_id FROM claims ' +
        'JOIN pages ON pages.id = claims.page_id AND pages.title = ? AND 
pages.prefix = ? ' +
-       'WHERE claims.commit_hash = ? AND claims.has_errorless_result = 0');
+       'WHERE claims.commit_hash = ? AND claims.has_errorless_result = 0';
 
-var dbInsertClaim = db.prepare(
+var dbInsertClaim = 
        'INSERT INTO claims ( page_id, commit_hash, timestamp ) ' +
-       'VALUES ( ?, ?, ? )');
+       'VALUES ( ?, ?, ? )';
 
-var dbUpdateClaim = db.prepare(
-       'UPDATE claims SET timestamp = ?, num_tries = num_tries + 1 WHERE id = 
?');
+var dbTryInsertClaim = 
+       'INSERT INTO claims ( page_id, commit_hash, timestamp ) ' +
+       'VALUES ( ( ' +
+       'SELECT pages.id ' +
+       'FROM pages ' +
+       'LEFT JOIN claims ON pages.id = claims.page_id AND claims.commit_hash = 
? ' +
+       'LEFT JOIN stats ON stats.id = pages.latest_result ' +
+       'WHERE num_fetch_errors < ? AND ' +
+       '( claims.id IS NULL OR ' +
+       '( claims.has_errorless_result = 0 AND claims.num_tries <= ? AND 
claims.timestamp < ? ) ) ' +
+       'ORDER BY stats.score DESC, ' +
+       'claims.timestamp ASC LIMIT 1 ' +
+       ' ), ?, ? )';
 
-var dbUpdateClaimResult = db.prepare(
-       'UPDATE claims SET has_errorless_result = 1 WHERE id = ?');
+var dbTryUpdateClaim = 
+       // the ids look redundant, but this is intended to atomically succeed, 
or fail if already recently reclaimed
+       'UPDATE claims SET timestamp = ?, num_tries = num_tries + 1 WHERE id = 
? AND id = (' +
+       'SELECT pages.id ' +
+       'FROM pages ' +
+       'LEFT JOIN claims ON pages.id = claims.page_id AND claims.commit_hash = 
? ' +
+       'LEFT JOIN stats ON stats.id = pages.latest_result ' +
+       'WHERE num_fetch_errors < ? AND ' +
+       '( claims.id IS NULL OR ' +
+       '( claims.has_errorless_result = 0 AND claims.num_tries <= ? AND 
claims.timestamp < ? ) ) ' +
+       'ORDER BY stats.score DESC, ' +
+       'claims.timestamp ASC LIMIT 1)';
 
-var dbFindStatRow = db.prepare(
-       'SELECT id FROM stats WHERE page_id = ? AND commit_hash = ?');
+var dbUpdateClaim = 
+       'UPDATE claims SET timestamp = ?, num_tries = num_tries + 1 WHERE id = 
?';
 
-var dbInsertResult = db.prepare(
+var dbUpdateClaimResult = 
+       'UPDATE claims SET has_errorless_result = 1 WHERE id = ?';
+
+var dbFindStatRow = 
+       'SELECT id FROM stats WHERE page_id = ? AND commit_hash = ?';
+
+var dbInsertResult = 
        'INSERT INTO results ( claim_id, result ) ' +
-       'VALUES ( ?, ? )');
+       'VALUES ( ?, ? )';
 
-var dbUpdateResult = db.prepare(
-       'UPDATE results SET result = ? WHERE claim_id = ?');
+var dbUpdateResult = 
+       'UPDATE results SET result = ? WHERE claim_id = ?';
 
-var dbInsertClaimStats = db.prepare(
+var dbInsertClaimStats = 
        'INSERT INTO stats ' +
        '( skips, fails, errors, score, page_id, commit_hash ) ' +
-       'VALUES ( ?, ?, ?, ?, ?, ? ) ' );
+       'VALUES ( ?, ?, ?, ?, ?, ? ) ';
 
-var dbUpdateClaimStats = db.prepare(
+var dbUpdateClaimStats = 
        'UPDATE stats ' +
        'SET skips = ?, fails = ?, errors = ?, score = ? ' +
-       'WHERE page_id = ? AND commit_hash = ?' );
+       'WHERE page_id = ? AND commit_hash = ?';
 
-var dbUpdateLatestResult = db.prepare(
+var dbUpdateLatestResult = 
        'UPDATE pages ' +
        'SET latest_result = ( SELECT id from stats ' +
     'WHERE stats.commit_hash = ? AND page_id = pages.id ) ' +
-    'WHERE id = ?' );
+    'WHERE id = ?';
 
-var dbLatestCommitHash = db.prepare(
-       'SELECT hash FROM commits ORDER BY timestamp DESC LIMIT 1');
+var dbLatestCommitHash = 
+       'SELECT hash FROM commits ORDER BY timestamp DESC LIMIT 1';
 
-var dbSecondLastCommitHash = db.prepare(
-       'SELECT hash FROM commits ORDER BY timestamp DESC LIMIT 1 OFFSET 1');
+var dbSecondLastCommitHash = 
+       'SELECT hash FROM commits ORDER BY timestamp DESC LIMIT 1 OFFSET 1';
 
 // IMPORTANT: node-sqlite3 library has a bug where it seems to cache
 // invalid results when a prepared statement has no variables.
 // Without this dummy variable as a workaround for the caching bug,
 // stats query always fails after the first invocation.  So, if you
 // do upgrade the library, please test before removing this workaround.
-var dbStatsQuery = db.prepare(
+var dbStatsQuery = 
        'SELECT ? AS cache_bug_workaround, ' +
        '(select hash from commits order by timestamp desc limit 1) as maxhash, 
' +
        '(select count(*) from stats where stats.commit_hash = ' +
@@ -144,9 +186,9 @@
        'AND s2.commit_hash = (SELECT hash FROM commits ORDER BY timestamp DESC 
LIMIT 1 OFFSET 1 ) ' +
        'AND s1.score < s2.score ) as numfixes '  +
 
-       'FROM pages JOIN stats on pages.latest_result = stats.id');
+       'FROM pages JOIN stats on pages.latest_result = stats.id';
 
-var dbPerWikiStatsQuery = db.prepare(
+var dbPerWikiStatsQuery = 
        'SELECT ? AS cache_bug_workaround, ' +
        '(select hash from commits order by timestamp desc limit 1) as maxhash, 
' +
        '(select count(*) from stats join pages on stats.page_id = pages.id ' +
@@ -188,9 +230,9 @@
        'AND pages.prefix = ? ' +
        'AND s1.score < s2.score ) as numfixes ' +
 
-       'FROM pages JOIN stats on pages.latest_result = stats.id WHERE 
pages.prefix = ?');
+       'FROM pages JOIN stats on pages.latest_result = stats.id WHERE 
pages.prefix = ?';
 
-var dbFailsQuery = db.prepare(
+var dbFailsQuery = 
        'SELECT pages.title, pages.prefix, commits.hash, stats.errors, 
stats.fails, stats.skips ' +
        'FROM stats ' +
        'JOIN (' +
@@ -199,27 +241,27 @@
        'JOIN pages ON stats.page_id = pages.id ' +
        'JOIN commits ON stats.commit_hash = commits.hash ' +
        'ORDER BY stats.score DESC ' +
-       'LIMIT 40 OFFSET ?' );
+       'LIMIT 40 OFFSET ?' ;
 
-var dbGetOneResult = db.prepare(
+var dbGetOneResult = 
        'SELECT result FROM results ' +
        'JOIN claims ON results.claim_id = claims.id ' +
        'JOIN commits ON claims.commit_hash = commits.hash ' +
        'JOIN pages ON pages.id = claims.page_id ' +
        'WHERE pages.title = ? AND pages.prefix = ? ' +
-       'ORDER BY commits.timestamp DESC LIMIT 1' );
+       'ORDER BY commits.timestamp DESC LIMIT 1' ;
 
-var dbGetResultWithCommit = db.prepare(
+var dbGetResultWithCommit = 
     'SELECT result FROM results ' +
     'JOIN claims ON results.claim_id = claims.id ' +
     'AND claims.commit_hash = ? ' +
     'JOIN pages ON pages.id = claims.page_id ' +
-    'WHERE pages.title = ? AND pages.prefix = ?' );
+    'WHERE pages.title = ? AND pages.prefix = ?'; 
 
-var dbFailedFetches = db.prepare(
-       'SELECT title, prefix FROM pages WHERE num_fetch_errors >= ?');
+var dbFailedFetches = 
+       'SELECT title, prefix FROM pages WHERE num_fetch_errors >= ?';
 
-var dbRegressedPages = db.prepare(
+var dbRegressedPages = 
        'SELECT pages.title, pages.prefix, ' +
        's1.commit_hash AS new_commit, s1.errors AS new_errors, s1.fails AS 
new_fails, s1.skips AS new_skips, ' +
        's2.commit_hash AS old_commit, s2.errors AS old_errors, s2.fails AS 
old_fails, s2.skips AS old_skips ' +
@@ -229,9 +271,9 @@
        'WHERE s2.id != s1.id AND s1.score > s2.score ' +
        'GROUP BY pages.id ' + // picks a "random" past hash from which we 
regressed
        'ORDER BY s1.score - s2.score DESC ' +
-       'LIMIT 40 OFFSET ?');
+       'LIMIT 40 OFFSET ?';
 
-var dbFixedPages = db.prepare(
+var dbFixedPages = 
        'SELECT pages.title, pages.prefix, ' +
        's1.commit_hash AS new_commit, s1.errors AS new_errors, s1.fails AS 
new_fails, s1.skips AS new_skips, ' +
        's2.commit_hash AS old_commit, s2.errors AS old_errors, s2.fails AS 
old_fails, s2.skips AS old_skips ' +
@@ -241,21 +283,21 @@
        'WHERE s2.id != s1.id AND s1.score < s2.score ' +
        'GROUP BY pages.id ' + // picks a "random" past hash from which we 
regressed
        'ORDER BY s1.score - s2.score ASC ' +
-       'LIMIT 40 OFFSET ?');
+       'LIMIT 40 OFFSET ?';
 
-var dbFailsDistribution = db.prepare(
+var dbFailsDistribution = 
        'SELECT ? AS caching_bug_workaround, fails, count(*) AS num_pages ' +
        'FROM stats ' +
        'JOIN pages ON pages.latest_result = stats.id ' +
-       'GROUP by fails');
+       'GROUP by fails';
 
-var dbSkipsDistribution = db.prepare(
+var dbSkipsDistribution = 
        'SELECT ? AS caching_bug_workaround, skips, count(*) AS num_pages ' +
        'FROM stats ' +
        'JOIN pages ON pages.latest_result = stats.id ' +
-       'GROUP by skips');
+       'GROUP by skips';
 
-var dbCommits = db.prepare(
+var dbCommits = 
        'SELECT ? AS caching_bug_workaround, hash, timestamp, ' +
        //// get the number of fixes column
        //      '(SELECT count(*) ' +
@@ -274,9 +316,9 @@
        //// get the number of tests for this commit column
                '(select count(*) from stats where stats.commit_hash = c1.hash) 
as numtests ' +
        'FROM commits c1 ' +
-       'ORDER BY timestamp DESC');
+       'ORDER BY timestamp DESC';
 
-var dbFixesBetweenRevs = db.prepare(
+var dbFixesBetweenRevs = 
        'SELECT pages.title, pages.prefix, ' +
        's1.commit_hash AS new_commit, s1.errors AS new_errors, s1.fails AS 
new_fails, s1.skips AS new_skips, ' +
        's2.commit_hash AS old_commit, s2.errors AS old_errors, s2.fails AS 
old_fails, s2.skips AS old_skips ' +
@@ -285,16 +327,16 @@
        'JOIN stats AS s2 ON s2.page_id = pages.id ' +
        'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score < 
s2.score ' +
        'ORDER BY s1.score - s2.score ASC ' +
-       'LIMIT 40 OFFSET ?');
+       'LIMIT 40 OFFSET ?';
 
-var dbNumFixesBetweenRevs = db.prepare(
+var dbNumFixesBetweenRevs = 
        'SELECT count(*) as numFixes ' +
        'FROM pages ' +
        'JOIN stats AS s1 ON s1.page_id = pages.id ' +
        'JOIN stats AS s2 ON s2.page_id = pages.id ' +
-       'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score < 
s2.score ');
+       'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score < 
s2.score ';
 
-var dbRegressionsBetweenRevs = db.prepare(
+var dbRegressionsBetweenRevs = 
        'SELECT pages.title, pages.prefix, ' +
        's1.commit_hash AS new_commit, s1.errors AS new_errors, s1.fails AS 
new_fails, s1.skips AS new_skips, ' +
        's2.commit_hash AS old_commit, s2.errors AS old_errors, s2.fails AS 
old_fails, s2.skips AS old_skips ' +
@@ -303,16 +345,16 @@
        'JOIN stats AS s2 ON s2.page_id = pages.id ' +
        'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score > 
s2.score ' +
        'ORDER BY s1.score - s2.score DESC ' +
-       'LIMIT 40 OFFSET ?');
+       'LIMIT 40 OFFSET ?';
 
-var dbNumRegressionsBetweenRevs = db.prepare(
+var dbNumRegressionsBetweenRevs = 
        'SELECT count(*) as numRegressions ' +
        'FROM pages ' +
        'JOIN stats AS s1 ON s1.page_id = pages.id ' +
        'JOIN stats AS s2 ON s2.page_id = pages.id ' +
-       'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score > 
s2.score ');
+       'WHERE s1.commit_hash = ? AND s2.commit_hash = ? AND s1.score > 
s2.score ';
 
-var dbNumRegressionsBetweenLastTwoRevs = db.prepare(
+var dbNumRegressionsBetweenLastTwoRevs = 
        'SELECT count(*) as numRegressions ' +
        'FROM pages ' +
        'JOIN stats AS s1 ON s1.page_id = pages.id ' +
@@ -321,18 +363,18 @@
                                'FROM commits ORDER BY timestamp DESC LIMIT 1 ) 
' +
         'AND s2.commit_hash = (SELECT hash ' +
                               'FROM commits ORDER BY timestamp DESC LIMIT 1 
OFFSET 1) ' +
-        'AND s1.score > s2.score ');
+        'AND s1.score > s2.score ';
 
-var dbResultsQuery = db.prepare(
+var dbResultsQuery = 
        'SELECT result FROM results'
-);
+;
 
-var dbResultsPerWikiQuery = db.prepare(
+var dbResultsPerWikiQuery = 
        'SELECT result FROM results ' +
        'JOIN claims ON claims.id = results.claim_id ' +
        'JOIN pages ON pages.id = claims.page_id ' +
        'WHERE pages.prefix = ?'
-);
+;
 
 var dbUpdateErrCB = function(title, prefix, hash, type, msg, err) {
        if (err) {
@@ -351,40 +393,40 @@
                db.serialize( function () {
                        // SSS FIXME: what about error checks?
                        dbInsertCommit.run( [ commitHash, decodeURIComponent( 
req.query.ctime ) ] );
-                       dbFindClaimByPageId.get( [ row.id, commitHash ], 
function ( err, claim ) {
+                       dbFindClaimByPageId.get( [ row[0].id, commitHash ], 
function ( err, claim ) {
                                if (claim) {
                                        // Ignoring possible duplicate 
processing
                                        // Increment the # of tries, update 
timestamp
                                        dbUpdateClaim.run([Date.now(), 
claim.id],
-                                               dbUpdateErrCB.bind(null, 
row.title, row.prefix, commitHash, "claim", null));
+                                               dbUpdateErrCB.bind(null, 
row[0].title, row[0].prefix, commitHash, "claim", null));
 
                                        if (claim.num_tries >= maxTries) {
                                                // Too many failures.  Insert 
an error stats entry and retry fetch
-                                               console.log( ' CRASHER?', 
row.prefix + ':' + row.title );
+                                               console.log( ' CRASHER?', 
row[0].prefix + ':' + row[0].title );
                                                var stats = [0, 0, 1, 
statsScore(0,0,1), claim.page_id, commitHash];
                                                dbInsertClaimStats.run( stats, 
function ( err ) {
                                                        if (err) {
                                                                // Try updating 
the stats instead of inserting if we got an error
                                                                // Likely a sql 
constraint error
                                                                
dbUpdateClaimStats.run(stats, function (err) {
-                                                                       
dbUpdateErrCB( row.title, row.prefix, commitHash, 'stats', null, err );
+                                                                       
dbUpdateErrCB( row[0].title, row[0].prefix, commitHash, 'stats', null, err );
                                                                });
                                                        }
                                                } );
                                                fetchPage(commitHash, 
cutOffTimestamp, req, res);
                                        } else {
-                                               console.log( ' ->', row.prefix 
+ ':' + row.title );
-                                               res.send( { prefix: row.prefix, 
title: row.title } );
+                                               console.log( ' ->', 
row[0].prefix + ':' + row[0].title );
+                                               res.send( { prefix: 
row[0].prefix, title: row[0].title } );
                                        }
                                } else {
                                        // Claim doesn't exist
-                                       dbInsertClaim.run( [ row.id, 
commitHash, Date.now() ], function(err) {
+                                       dbInsertClaim.run( [ row[0].id, 
commitHash, Date.now() ], function(err) {
                                                if (!err) {
-                                                       console.log( ' ->', 
row.prefix + ':' + row.title );
-                                                       res.send( { prefix: 
row.prefix, title: row.title } );
+                                                       console.log( ' ->', 
row[0].prefix + ':' + row[0].title );
+                                                       res.send( { prefix: 
row[0].prefix, title: row[0].title } );
                                                } else {
                                                        console.error(err);
-                                                       console.error("Multiple 
clients trying to access the same title:", row.prefix + ':' + row.title );
+                                                       console.error("Multiple 
clients trying to access the same title:", row[0].prefix + ':' + row[0].title );
                                                        // In the rare scenario 
that some other client snatched the
                                                        // title before us, get 
a new title (use the randomized ordering query)
                                                        dbGetTitleRandom.get( [ 
commitHash, maxFetchRetries, maxTries, cutOffTimestamp ],
@@ -410,6 +452,55 @@
                titleCallback.bind( null, req, res, true, commitHash, 
cutOffTimestamp ) );
 };
 
+
+var claimPage = function( commitHash, cutOffTimestamp, req, res ) {
+       // rather than getting a title then trying to claim it, atomically 
claim then fetch the matching title
+/*
+       db.query(
+               'start transaction;' +
+               "insert into pages(title) values ('Foo30');" + 
+               "insert into pages(title) values ('Foo31');" +
+               "insert into pages(title) values ('Foo26');" + 
+               'COMMIT; '
+               , [], function(err) { db.query('rollback', [], 
function(err){}); if( err ) console.error( err ); } );
+*/
+       db.query( 'START TRANSACTION;', [], function( err ){ if ( err ) 
console.error( err ); } );
+       db.query( dbGetTitle, [ commitHash, maxFetchRetries, maxTries, 
cutOffTimestamp, 0 ], function( err, row ) {
+               if ( err ) {
+                       console.error( err );
+                       console.error( "Failed fetching row to update (" + [ 
cutOffTimestamp, commitHash, maxFetchRetries, maxTries, Date.now() ] + ')' );
+               } else {
+                       console.log(row[0].title);
+                       var targetID = row[0].id;
+                       console.log( 'Trying insert of ' + targetID + ' with ' 
+ [ Date.now(), targetID ]);
+                       db.query( dbInsertClaim, [ targetID, commitHash, 
Date.now() ], function(err) {
+                               if ( err ) {
+                                       db.query( 'ROLLBACK;', [], 
function(err){});
+                                       console.error( err );
+                                       console.error( "Failed updating with (" 
+ [ Date.now(), targetID ] + ')' );
+                               } else {
+                                       db.query( 'COMMIT;', [], 
function(err){});
+                                       console.log( 'Update succeeded.  Get 
detail ' + [targetID, commitHash ] );
+
+                                       // success get the updated id and fetch 
the detail to match
+                                       db.query( dbFindTitleForClaim, [ 
targetID, commitHash ], function( err, row ) {
+                                               console.log("XXXX" +  err + row 
);
+                                               if ( !err ) {
+                                                       console.log( row );
+                                                       console.log( ' ->', 
row[0].prefix + ':' + row[0].title + ' (updated)' );
+                                                       res.send( { prefix: 
row[0].prefix, title: row[0].title } );
+                                               } else {
+                                                       console.error( err );
+                                                       console.error( "Failed 
getting the title to match the update " + this.lastID );
+                                               }
+                                       });
+                               }
+                       });
+               }
+       });
+};
+
+
 var getTitle = function ( req, res ) {
        res.setHeader( 'Content-Type', 'text/plain; charset=UTF-8' );
 
@@ -419,7 +510,10 @@
        // or with parsing the page.
        //
        // Hopefully, no page takes longer than 10 minutes to parse. :)
-       fetchPage(req.query.commit, Date.now() - 600, req, res);
+
+       claimPage(req.query.commit, Date.now() - 600, req, res);
+
+//     fetchPage(req.query.commit, Date.now() - 600, req, res);
 };
 
 var statsScore = function(skipCount, failCount, errorCount) {
@@ -453,7 +547,7 @@
                // For now, always sending HTTP 200 back to client.
                res.send( '', 200 );
        } else {
-               dbFindClaimByTitle.get( [ title, prefix, commitHash ], function 
( err, claim ) {
+               db.query( dbFindClaimByTitle, [ title, prefix, commitHash ], 
function ( err, claim ) {
                        if (!err && claim) {
                                db.serialize( function () {
                                        dbClearFetchErrorCount.run([title, 
prefix],
@@ -551,15 +645,15 @@
                        res.status( 200 );
                        res.write( '<html><body>' );
 
-                       var tests = row.total,
-                       errorLess = row.no_errors,
-                       skipLess = row.no_skips,
-                       numRegressions = row.numregressions,
-                       numFixes = row.numfixes,
+                       var tests = row[0].total,
+                       errorLess = row[0].no_errors,
+                       skipLess = row[0].no_skips,
+                       numRegressions = row[0].numregressions,
+                       numFixes = row[0].numfixes,
                        noErrors = Math.round( 100 * 100 * errorLess / ( tests 
|| 1 ) ) / 100,
                        perfects = Math.round( 100* 100 * skipLess / ( tests || 
1 ) ) / 100,
                        syntacticDiffs = Math.round( 100 * 100 *
-                               ( row.no_fails / ( tests || 1 ) ) ) / 100;
+                               ( row[0].no_fails / ( tests || 1 ) ) ) / 100;
 
                        res.write( '<p>We have run roundtrip-tests on <b>' +
                                tests +
@@ -588,18 +682,18 @@
 
                        res.write( '<p>Latest revision:' );
                        res.write( '<table><tbody>');
-                       displayRow(res, "Git SHA1", row.maxhash);
-                       displayRow(res, "Test Results", row.maxresults);
+                       displayRow(res, "Git SHA1", row[0].maxhash);
+                       displayRow(res, "Test Results", row[0].maxresults);
                        displayRow(res, "Regressions", numRegressions);
                        displayRow(res, "Fixes", numFixes);
                        res.write( '</tbody></table></p>' );
 
                        res.write( '<p>Averages (over the latest results):' );
                        res.write( '<table><tbody>');
-                       displayRow(res, "Errors", row.avgerrors);
-                       displayRow(res, "Fails", row.avgfails);
-                       displayRow(res, "Skips", row.avgskips);
-                       displayRow(res, "Score", row.avgscore);
+                       displayRow(res, "Errors", row[0].avgerrors);
+                       displayRow(res, "Fails", row[0].avgfails);
+                       displayRow(res, "Skips", row[0].avgskips);
+                       displayRow(res, "Score", row[0].avgscore);
                        res.write( '</tbody></table></p>' );
                        res.write( indexLinkList() );
 
@@ -656,15 +750,15 @@
                                                res.write( 'red' );
                                        }
 
-                                       res.write( '"><a target="_blank" 
href="http://parsoid.wmflabs.org/_rt/' + row.prefix + '/' +
-                                               row.title + '">' +
-                                               row.prefix + ':' + row.title + 
'</a> | ' +
-                                               '<a target="_blank" 
href="http://localhost:8000/_rt/' + row.prefix + '/' + row.title +
+                                       res.write( '"><a target="_blank" 
href="http://parsoid.wmflabs.org/_rt/' + row[0].prefix + '/' +
+                                               row[0].title + '">' +
+                                               row[0].prefix + ':' + 
row[0].title + '</a> | ' +
+                                               '<a target="_blank" 
href="http://localhost:8000/_rt/' + row[0].prefix + '/' + row[0].title +
                                                '">@lh</a> | ' +
-                                               '<a target="_blank" 
href="/latestresult/' + row.prefix + '/' + row.title + '">latest result</a>' +
+                                               '<a target="_blank" 
href="/latestresult/' + row[0].prefix + '/' + row[0].title + '">latest 
result</a>' +
                                                '</td>' );
-                                       res.write( '<td>' + makeCommitLink( 
row.hash, row.title, row.prefix ) + '</td>' );
-                                       res.write( '<td>' + row.skips + 
'</td><td>' + row.fails + '</td><td>' + ( row.errors === null ? 0 : row.errors 
) + '</td></tr>' );
+                                       res.write( '<td>' + makeCommitLink( 
row[0].hash, row[0].title, row[0].prefix ) + '</td>' );
+                                       res.write( '<td>' + row[0].skips + 
'</td><td>' + row[0].fails + '</td><td>' + ( row[0].errors === null ? 0 : 
row[0].errors ) + '</td></tr>' );
                                }
                                res.end( '</table></body></html>' );
                        }
@@ -718,7 +812,7 @@
                res.setHeader( 'Content-Type', 'text/xml; charset=UTF-8' );
                res.status( 200 );
                res.write( '<?xml-stylesheet href="/static/result.css"?>\n' );
-               res.end( row.result );
+               res.end( row[0].result );
        } else {
                res.send( 'no results for that page at the requested revision', 
404 );
        }
@@ -877,7 +971,7 @@
                        } else {
                                var topfixesLink = "/topfixes/between/" + r1 + 
"/" + r2,
                                        header = "Total regressions between 
selected revisions: " +
-                                                       row.numRegressions +
+                                                       row[0].numRegressions +
                                                        ' | <a href="' + 
topfixesLink + '">topfixes</a>';
                                dbRegressionsBetweenRevs.all([r2, r1, offset ],
                                        displayPageList.bind(null, res, 
urlPrefix, page, header));
@@ -904,7 +998,7 @@
                                res.send( err.toString(), 500 );
                        } else {
                                var regressionLink = "/regressions/between/" + 
r1 + "/" + r2,
-                                       header = "Total fixes between selected 
revisions: " + row.numFixes +
+                                       header = "Total fixes between selected 
revisions: " + row[0].numFixes +
                                                ' | <a href="' + regressionLink 
+ '">regressions</a>';
                                dbFixesBetweenRevs.all([r2, r1, offset ],
                                        displayPageList.bind(null, res, 
urlPrefix, page, header));
diff --git a/js/tests/server/sql/create_everything.mysql 
b/js/tests/server/sql/create_everything.mysql
new file mode 100644
index 0000000..4b766d6
--- /dev/null
+++ b/js/tests/server/sql/create_everything.mysql
@@ -0,0 +1,48 @@
+-- Run this sql file on your database file before you do anything else
+
+CREATE TABLE commits (
+       hash CHAR( 40 ) NOT NULL UNIQUE PRIMARY KEY,
+       `timestamp` TIMESTAMP  NOT NULL
+);
+
+CREATE TABLE pages (
+       id INTEGER NOT NULL UNIQUE PRIMARY KEY AUTO_INCREMENT,
+       num_fetch_errors INTEGER NOT NULL DEFAULT 0,
+       title TEXT NOT NULL,
+       latest_result INTEGER DEFAULT NULL,
+       prefix CHAR( 2 ) NOT NULL DEFAULT 'en'
+);
+CREATE INDEX title_idx ON pages ( title ( 50 ) );
+CREATE INDEX latest_result_idx ON pages ( latest_result );
+CREATE UNIQUE INDEX title_prefix_idx ON pages ( title ( 50 ), prefix );
+
+CREATE TABLE results (
+       id INTEGER NOT NULL UNIQUE PRIMARY KEY AUTO_INCREMENT,
+       claim_id INTEGER NOT NULL,
+       result TEXT NOT NULL
+);
+CREATE INDEX claim_id_idx ON results ( claim_id );
+
+CREATE TABLE claims (
+       id INTEGER NOT NULL UNIQUE PRIMARY KEY AUTO_INCREMENT,
+       page_id INTEGER NOT NULL,
+       commit_hash CHAR( 40 ) NOT NULL,
+       num_tries INTEGER NOT NULL DEFAULT 1,
+        -- FIXME: rename to has_result or use a pointer to the result instead.
+        -- We currently just use it to track whether it has a result, error or
+        -- not.
+       has_errorless_result INTEGER NOT NULL DEFAULT 0,
+       `timestamp` TIMESTAMP NOT NULL
+);
+CREATE UNIQUE INDEX page_id_has_idx ON claims ( page_id, commit_hash );
+
+CREATE TABLE stats (
+       id INTEGER NOT NULL UNIQUE PRIMARY KEY AUTO_INCREMENT,
+       page_id INTEGER NOT NULL,
+       commit_hash CHAR( 40 ) NOT NULL,
+       skips INTEGER NOT NULL DEFAULT 0,
+       fails INTEGER NOT NULL DEFAULT 0,
+       errors INTEGER NOT NULL DEFAULT 0,
+       score INTEGER NOT NULL DEFAULT 0
+);
+CREATE INDEX page_idx ON stats ( page_id );
diff --git a/js/tests/server/sql/create_everything.sql 
b/js/tests/server/sql/create_everything.sqlite
similarity index 100%
rename from js/tests/server/sql/create_everything.sql
rename to js/tests/server/sql/create_everything.sqlite

-- 
To view, visit https://gerrit.wikimedia.org/r/69156
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: If4b2c2518f296a89a409a3a5328de5fc88aeac3b
Gerrit-PatchSet: 1
Gerrit-Project: mediawiki/extensions/Parsoid
Gerrit-Branch: master
Gerrit-Owner: Lwelling <[email protected]>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to