In my use case, the node process was exiting whenever the db had a connection
issue.
And, of course, the underlying issue was the fact that the connection was
emitting an uncatched `error` event.
My first approach was to handle that `error` event, so I exposed a way to
intercept it, but the cleaner solution is, as @jorgebay suggested, to update
the API to rename the error event to something that doesn't mess with the node
process.
I did leave the initial commit here, as I find it useful. Here's how I would
handle a reconnect with this new change:
```javascript
const gremlin = require('gremlin');
const DriverRemoteConnection = gremlin.driver.DriverRemoteConnection;
const MAX_RETRY_COUNT = 3;
const RETRY_DELAY = 1000;
let retryCount = 0;
let remoteConnection;
// this is the initial function that defines the driver (with it's own event
emitters)
// and attaches error event listeners before opening a connection
async function setup() {
remoteConnection = new DriverRemoteConnection('ws://localhost:8182/gremlin');
remoteConnection.on('socketError', handleDisconnect)
await connectAndRun()
}
// the main error handler
// it will trigger when the db is disconnected
async function handleDisconnect() {
// retry to connect and do the logic
setTimeout(connectAndRun, RETRY_DELAY)
}
async function connectAndRun() {
retryCount++
console.log(`Connecting... try #${retryCount}`)
// exit the process if we retried to connect too many times
if (retryCount > MAX_RETRY_COUNT) {
throw new Error(`retried ${MAX_RETRY_COUNT} times... let's quit`)
}
try {
// open the connection
await remoteConnection.open();
// task block
// do whatever needs to be done
// ...
await remoteConnection.close();
} catch (err) {
console.error('something bad has happened 😕');
}
}
```
Please let me know if you think I should split this PR into 2 separate ones.
[ Full content available at: https://github.com/apache/tinkerpop/pull/1202 ]
This message was relayed via gitbox.apache.org for [email protected]