[ 
https://issues.apache.org/jira/browse/ARTEMIS-2823?focusedWorklogId=494035&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-494035
 ]

ASF GitHub Bot logged work on ARTEMIS-2823:
-------------------------------------------

                Author: ASF GitHub Bot
            Created on: 02/Oct/20 16:07
            Start Date: 02/Oct/20 16:07
    Worklog Time Spent: 10m 
      Work Description: ehsavoie commented on a change in pull request #3204:
URL: https://github.com/apache/activemq-artemis/pull/3204#discussion_r498714118



##########
File path: 
artemis-jdbc-store/src/main/java/org/apache/activemq/artemis/jdbc/store/file/PostgresSequentialSequentialFileDriver.java
##########
@@ -36,37 +36,32 @@ public PostgresSequentialSequentialFileDriver() throws 
SQLException {
       super();
    }
 
-   public PostgresSequentialSequentialFileDriver(DataSource dataSource, 
SQLProvider provider) {
+   public PostgresSequentialSequentialFileDriver(JDBCConnectionProvider 
connectionProvider, SQLProvider provider) {
       super();
-      this.setDataSource(dataSource);
-      this.setSqlProvider(provider);
-   }
-
-   public PostgresSequentialSequentialFileDriver(Connection connection, 
SQLProvider provider) {
-      super();
-      this.setConnection(connection);
+      this.setJdbcConnectionProvider(connectionProvider);
       this.setSqlProvider(provider);
    }
 
    @Override
-   protected void prepareStatements() throws SQLException {
-      this.largeObjectManager = new PostgresLargeObjectManager(connection);
-      this.deleteFile = 
connection.prepareStatement(sqlProvider.getDeleteFileSQL());
-      this.createFile = 
connection.prepareStatement(sqlProvider.getInsertFileSQL(), 
Statement.RETURN_GENERATED_KEYS);
-      this.selectFileByFileName = 
connection.prepareStatement(sqlProvider.getSelectFileByFileName());
-      this.copyFileRecord = 
connection.prepareStatement(sqlProvider.getCopyFileRecordByIdSQL());
-      this.renameFile = 
connection.prepareStatement(sqlProvider.getUpdateFileNameByIdSQL());
-      this.readLargeObject = 
connection.prepareStatement(sqlProvider.getReadLargeObjectSQL());
-      this.appendToLargeObject = 
connection.prepareStatement(sqlProvider.getAppendToLargeObjectSQL());
-      this.selectFileNamesByExtension = 
connection.prepareStatement(sqlProvider.getSelectFileNamesByExtensionSQL());
+   protected void prepareStatements() {
+      this.largeObjectManager = new PostgresLargeObjectManager();
+      this.deleteFile = sqlProvider.getDeleteFileSQL();
+      this.createFile = sqlProvider.getInsertFileSQL();
+      this.createFileAutogeneratedKeys = Statement.RETURN_GENERATED_KEYS;
+      this.selectFileByFileName = sqlProvider.getSelectFileByFileName();
+      this.copyFileRecord = sqlProvider.getCopyFileRecordByIdSQL();
+      this.renameFile = sqlProvider.getUpdateFileNameByIdSQL();
+      this.readLargeObject = sqlProvider.getReadLargeObjectSQL();
+      this.appendToLargeObject = sqlProvider.getAppendToLargeObjectSQL();
+      this.selectFileNamesByExtension = 
sqlProvider.getSelectFileNamesByExtensionSQL();
    }
 
    @Override
    public void createFile(JDBCSequentialFile file) throws SQLException {
-      synchronized (connection) {
-         try {
+      try (Connection connection = connectionProvider.getConnection()) {
+         try (PreparedStatement createFile = 
connection.prepareStatement(this.createFile)) {

Review comment:
       you need to pass Statement.RETURN_GENERATED_KEYS as second argument 
(check line 55 on previous code)

##########
File path: 
artemis-jdbc-store/src/main/java/org/apache/activemq/artemis/jdbc/store/file/PostgresSequentialSequentialFileDriver.java
##########
@@ -132,10 +129,10 @@ public int writeToFile(JDBCSequentialFile file, byte[] 
data, boolean append) thr
    public int readFromFile(JDBCSequentialFile file, ByteBuffer bytes) throws 
SQLException {
       Object largeObject = null;
       long oid = getOID(file);
-      synchronized (connection) {
+      try (Connection connection = connectionProvider.getConnection()) {
          try {
             connection.setAutoCommit(false);
-            largeObject = largeObjectManager.open(oid, 
PostgresLargeObjectManager.READ);
+            largeObject = largeObjectManager.open(connection, oid, 
PostgresLargeObjectManager.READ);

Review comment:
       We should declare the largeObject here instead of 4 lines before (even 
if that was how it was done before)

##########
File path: 
artemis-jdbc-store/src/main/java/org/apache/activemq/artemis/jdbc/store/file/PostgresSequentialSequentialFileDriver.java
##########
@@ -87,31 +82,33 @@ public void createFile(JDBCSequentialFile file) throws 
SQLException {
 
    @Override
    public void loadFile(JDBCSequentialFile file) throws SQLException {
-      synchronized (connection) {
-         connection.setAutoCommit(false);
-         readLargeObject.setLong(1, file.getId());
+      try (Connection connection = connectionProvider.getConnection()) {
+         try (PreparedStatement readLargeObject = 
connection.prepareStatement(this.readLargeObject)) {
+            connection.setAutoCommit(false);
+            readLargeObject.setLong(1, file.getId());
 
-         try (ResultSet rs = readLargeObject.executeQuery()) {
-            if (rs.next()) {
-               file.setWritePosition(getPostGresLargeObjectSize(file));
+            try (ResultSet rs = readLargeObject.executeQuery()) {
+               if (rs.next()) {
+                  file.setWritePosition(getPostGresLargeObjectSize(file));
+               }
+               connection.commit();
+            } catch (SQLException e) {
+               connection.rollback();
+               throw e;
             }
-            connection.commit();
-         } catch (SQLException e) {
-            connection.rollback();
-            throw e;
          }
       }
    }
 
    @Override
    public int writeToFile(JDBCSequentialFile file, byte[] data, boolean 
append) throws SQLException {
-      synchronized (connection) {
+      try (Connection connection = connectionProvider.getConnection()) {
          Object largeObject = null;
 
          Long oid = getOID(file);
          try {
             connection.setAutoCommit(false);
-            largeObject = largeObjectManager.open(oid, 
PostgresLargeObjectManager.WRITE);
+            largeObject = largeObjectManager.open(connection, oid, 
PostgresLargeObjectManager.WRITE);

Review comment:
       We should declare the largeObject here instead of 4 lines before (even 
if that was how it was done before)




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 494035)
    Time Spent: 14h  (was: 13h 50m)

> Improve JDBC connection management
> ----------------------------------
>
>                 Key: ARTEMIS-2823
>                 URL: https://issues.apache.org/jira/browse/ARTEMIS-2823
>             Project: ActiveMQ Artemis
>          Issue Type: Improvement
>          Components: Broker
>            Reporter: Mikko
>            Priority: Major
>          Time Spent: 14h
>  Remaining Estimate: 0h
>
> I have a case where the whole clustering reliability and HA must rely on HA 
> capabilities of clustered database, and running on top of application server 
> is not an option.
> The current JDBC store implementation is rather bare bones on the connection 
> management side. JDBC driver is used directly with no management layer. At 
> startup, the broker just opens couple of direct connections to database and 
> expects them to be available forever. This is something that cannot be 
> expected in HA production environment. So, similarly to the discussion linked 
> below, in our case we lose the db connection after one hour, and all the 
> brokers need to be restared to get new connections:
> [http://activemq.2283324.n4.nabble.com/Artemis-does-not-reconnect-to-MySQL-after-connection-timeout-td4751956.html]
>  
> This is something that could be resolved by simply using JDBC4 isValid 
> checks, but proper connection handling and pooling through datasource would 
> be preferrable.
> I have implemented a solution for this by using DBCP2 datasource. Our test 
> cluster has been successfully running this forked version since the release 
> of Artemis 2.13.0. I will prepare of pull request if this is seen to be 
> something that can be useful.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to