[ https://issues.apache.org/jira/browse/RANGER-4505?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17805900#comment-17805900 ]
Bhavik Patel commented on RANGER-4505: -------------------------------------- This issue is observed as jackson-1.x version is removed in the hadoop-3.4 : [https://github.com/apache/hadoop/commit/04b6b9a87bf024e82aa50328b4994aea4268d98e#diff-635a9ebcc70f58d8ca1c55b0d78cc90064a5081d189162a432271778da3cf018] >From ranger end also we have to remove jackson-1.x version as it's vulnerable. > [hdfs] Error - java.lang.ClassNotFoundException: > org.codehaus.jackson.jaxrs.JacksonJsonProvider > ----------------------------------------------------------------------------------------------- > > Key: RANGER-4505 > URL: https://issues.apache.org/jira/browse/RANGER-4505 > Project: Ranger > Issue Type: Bug > Components: plugins > Affects Versions: 2.3.0, 2.4.0 > Reporter: Anatoly > Priority: Major > > ranger_version 2.3.0 > After upgrading the hadoop version from 3.1. to 3.3.6 and 3.3.5, I get an > error for hdfs - > java.lang.ClassNotFoundException: > org.codehaus.jackson.jaxrs.JacksonJsonProvider > > {code:java} > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,181 ERROR > namenode.NameNode: Failed to start namenode. > Oct 31 14:55:05 hadoop001 hdfs[1417563]: java.lang.NoClassDefFoundError: > org/codehaus/jackson/jaxrs/JacksonJsonProvider > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.RangerRESTClient.buildClient(RangerRESTClient.java:226) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.RangerRESTClient.getClient(RangerRESTClient.java:193) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.RangerRESTClient.get(RangerRESTClient.java:473) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.admin.client.RangerAdminRESTClient.getRangerRolesDownloadResponse(RangerAdminRESTClient.java:1340) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.admin.client.RangerAdminRESTClient.getRolesIfUpdatedWithCred(RangerAdminRESTClient.java:1202) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.admin.client.RangerAdminRESTClient.getRolesIfUpdated(RangerAdminRESTClient.java:167) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.RangerRolesProvider.loadUserGroupRolesFromAdmin(RangerRolesProvider.java:183) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.RangerRolesProvider.loadUserGroupRoles(RangerRolesProvider.java:123) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.PolicyRefresher.loadRoles(PolicyRefresher.java:495) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.util.PolicyRefresher.startRefresher(PolicyRefresher.java:144) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.service.RangerBasePlugin.init(RangerBasePlugin.java:243) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer.start(RangerHdfsAuthorizer.java:112) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer.start(RangerHdfsAuthorizer.java:86) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startCommonServices(FSNamesystem.java:1321) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.startCommonServices(NameNode.java:883) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:804) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:1033) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:1008) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1782) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1847) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: Caused by: > java.lang.ClassNotFoundException: > org.codehaus.jackson.jaxrs.JacksonJsonProvider > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.lang.ClassLoader.findClass(ClassLoader.java:718) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.classloader.RangerPluginClassLoader$MyClassLoader.findClass(RangerPluginClassLoader.java:316) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:588) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:521) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.plugin.classloader.RangerPluginClassLoader.loadClass(RangerPluginClassLoader.java:160) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: ... 20 more > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,198 INFO > util.ExitUtil: Exiting with status 1: java.lang.NoClassDefFoundError: > org/codehaus/jackson/jaxrs/JacksonJsonProvider > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,209 INFO > provider.AuditProviderFactory: ==> JVMShutdownHook.run() > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,209 INFO > provider.AuditProviderFactory: JVMShutdownHook: Signalling async audit > cleanup to start. > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,209 INFO > provider.AuditProviderFactory: JVMShutdownHook: Waiting up to 30 seconds for > audit cleanup to finish. > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,209 INFO > provider.AuditProviderFactory: RangerAsyncAuditCleanup: Starting cleanup > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,210 INFO > destination.HDFSAuditDestination: Flush called. > name=hdfs.async.multi_dest.batch.hdfs > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > utils.RangerJSONAuditWriter: Flush called. name=hdfs > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > queue.AuditAsyncQueue: Stop called. name=hdfs.async > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > queue.AuditAsyncQueue: Interrupting consumerThread. name=hdfs.async, > consumer=hdfs.async.multi_dest > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > provider.AuditProviderFactory: RangerAsyncAuditCleanup: Done cleanup > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > provider.AuditProviderFactory: RangerAsyncAuditCleanup: Waiting to audit > cleanup start signal > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > provider.AuditProviderFactory: JVMShutdownHook: Audit cleanup finished after > 2 milli seconds > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > provider.AuditProviderFactory: JVMShutdownHook: Interrupting ranger async > audit cleanup thread > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,212 INFO > provider.AuditProviderFactory: RangerAsyncAuditCleanup: Interrupted while > waiting for audit startCleanup signal! Exiting the thread... > Oct 31 14:55:05 hadoop001 hdfs[1417563]: java.lang.InterruptedException > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1040) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1345) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > org.apache.ranger.audit.provider.AuditProviderFactory$RangerAsyncAuditCleanup.run(AuditProviderFactory.java:503) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: at > java.base/java.lang.Thread.run(Thread.java:834) > Oct 31 14:55:05 hadoop001 hdfs[1417563]: 2023-10-31 14:55:05,211 INFO > queue.AuditAsyncQueue: Caught exception in consumer thread. Shutdown might be > in progress{code} > > > after I added the missing jars to ranger-hdfs-pluginimpl, I get another error > > {code:java} > ERROR namenode.NameNode: Failed to start namenode. > Oct 31 15:21:18 hadoop001 hdfs[1423028]: java.util.ServiceConfigurationError: > org.apache.hadoop.security.alias.CredentialProviderFactory: > org.apache.hadoop.security.alias.BouncyCastleFipsKeyStoreProvider$Factory not > a subtype > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > java.base/java.util.ServiceLoader.fail(ServiceLoader.java:589) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > java.base/java.util.ServiceLoader$LazyClassPathLookupIterator.hasNextService(ServiceLoader.java:1237) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > java.base/java.util.ServiceLoader$LazyClassPathLookupIterator.hasNext(ServiceLoader.java:1265) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > java.base/java.util.ServiceLoader$2.hasNext(ServiceLoader.java:1300) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > java.base/java.util.ServiceLoader$3.hasNext(ServiceLoader.java:1385) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.security.alias.CredentialProviderFactory.<clinit>(CredentialProviderFactory.java:57) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider.getCredentialProviders(RangerCredentialProvider.java:69) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider.getCredentialString(RangerCredentialProvider.java:47) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.audit.provider.MiscUtil.getCredentialString(MiscUtil.java:455) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.audit.destination.SolrAuditDestination.getKeyManagers(SolrAuditDestination.java:348) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.audit.destination.SolrAuditDestination.connect(SolrAuditDestination.java:131) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.audit.destination.SolrAuditDestination.init(SolrAuditDestination.java:96) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.audit.provider.AuditProviderFactory.init(AuditProviderFactory.java:183) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.plugin.service.RangerBasePlugin.init(RangerBasePlugin.java:232) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer.start(RangerHdfsAuthorizer.java:112) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer.start(RangerHdfsAuthorizer.java:86) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startCommonServices(FSNamesystem.java:1321) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.startCommonServices(NameNode.java:883) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:804) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:1033) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:1008) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1782) > Oct 31 15:21:18 hadoop001 hdfs[1423028]: at > org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1847){code} > -- This message was sent by Atlassian Jira (v8.20.10#820010)