hive执行truncate table报错堆栈信息
hiveserver2中的报错如下
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteExcepti on The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/p erson is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTran slatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamen odeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
)
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1066) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
... 11 more
Caused by: org.apache.hadoop.hive.metastore.api.MetaException: Got exception: org.apache.hadoop.ipc.RemoteException The directory /wa rehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottab le and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTran slatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamen odeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(Th riftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(Th riftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result.read(ThriftHiveMetastore.java) ~[hive-exec- 3.1.3.jar:3.1.3]
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:86) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_truncate_table(ThriftHiveMetastore.java:1867) ~[hive- exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.truncate_table(ThriftHiveMetastore.java:1852) ~[hive-exec- 3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1292) ~[hive-exec-3.1.3.jar:3. 1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1286) ~[hive-exec-3.1.3.jar:3. 1.3]
at org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient.truncateTable(SessionHiveMetaStoreClient.java:157) ~[hive-ex ec-3.1.3.jar:3.1.3]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.invoke(RetryingMetaStoreClient.java:212) ~[hive-exec-3.1.3.jar:3. 1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient$SynchronizedHandler.invoke(HiveMetaStoreClient.java:2773) ~[hive-exec -3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1064) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
... 11 more
Hivemestastore接收端的日志
FileUtils.java:moveToTrash(94)) - Failed to move to trash: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/person; Force to delete it.
2023-07-10T11:25:58,684 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(166)) - Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
org.apache.hadoop.ipc.RemoteException: The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1579) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.Client.call(Client.java:1525) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.Client.call(Client.java:1422) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:231) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118) ~[hadoop-common-3.2.3.jar:?]
at com.sun.proxy.$Proxy37.delete(Unknown Source) ~[?:?]
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.delete(ClientNamenodeProtocolTranslatorPB.java:644) ~[hadoop-hdfs-client-3.2.3.jar:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359) ~[hadoop-common-3.2.3.jar:?]
at com.sun.proxy.$Proxy38.delete(Unknown Source) ~[?:?]
at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1658) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:979) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:976) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:986) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hive.metastore.utils.FileUtils.moveToTrash(FileUtils.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:41) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841) ~[hive-exec-3.1.3.jar:3.1.3]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy31.truncate_table(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]
at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]
at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286) ~[hive-exec-3.1.3.jar:3.1.3]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]
at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]
2023-07-10T11:25:58,685 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(167)) - Converting exception to MetaException
2023-07-10T11:25:58,686 ERROR [pool-11-thread-184]: metastore.RetryingHMSHandler (RetryingHMSHandler.java:invokeInternal(201)) - MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
)
at org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.logAndThrowMetaException(MetaStoreUtils.java:168)
at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:51)
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374)
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108)
at com.sun.proxy.$Proxy31.truncate_table(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
2023-07-10T11:26:00,848 INFO [pool-6-thread-10]: txn.AcidOpenTxnsCounterService (AcidOpenTxnsCounterService.java:run(51)) - AcidOpenTxnsCounterService ran for 0 seconds. isAliveCounter = 173463
2023-07-10T11:26:18,497 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db1
2023-07-10T11:26:18,497 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db1
2023-07-10T11:26:18,520 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db1
2023-07-10T11:26:18,520 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db1
2023-07-10T11:26:18,533 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_tables: db=@hive#db1 pat=.*
2023-07-10T11:26:18,533 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_tables: db=@hive#db1 pat=.*
2023-07-10T11:26:21,118 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.person
2023-07-10T11:26:21,118 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.person
2023-07-10T11:26:21,142 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.person
2023-07-10T11:26:21,142 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.person
2023-07-10T11:26:21,189 WARN [pool-11-thread-184]: utils.FileUtils (FileUtils.java:moveToTrash(94)) - Failed to move to trash: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/person; Force to delete it.
2023-07-10T11:26:21,194 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(166)) - Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
org.apache.hadoop.ipc.RemoteException: The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1579) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.Client.call(Client.java:1525) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.Client.call(Client.java:1422) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:231) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118) ~[hadoop-common-3.2.3.jar:?]
at com.sun.proxy.$Proxy37.delete(Unknown Source) ~[?:?]
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.delete(ClientNamenodeProtocolTranslatorPB.java:644) ~[hadoop-hdfs-client-3.2.3.jar:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359) ~[hadoop-common-3.2.3.jar:?]
at com.sun.proxy.$Proxy38.delete(Unknown Source) ~[?:?]
at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1658) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:979) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:976) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:986) ~[hadoop-hdfs-client-3.2.3.jar:?]
at org.apache.hadoop.hive.metastore.utils.FileUtils.moveToTrash(FileUtils.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:41) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841) ~[hive-exec-3.1.3.jar:3.1.3]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy31.truncate_table(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]
at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]
at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286) ~[hive-exec-3.1.3.jar:3.1.3]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]
at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]
2023-07-10T11:26:21,194 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(167)) - Converting exception to MetaException
2023-07-10T11:26:21,195 ERROR [pool-11-thread-184]: metastore.RetryingHMSHandler (RetryingHMSHandler.java:invokeInternal(201)) - MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
)
at org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.logAndThrowMetaException(MetaStoreUtils.java:168)
at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:51)
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374)
at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108)
at com.sun.proxy.$Proxy31.truncate_table(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
2023-07-10T11:26:38,518 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db1
2023-07-10T11:26:38,518 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db1
2023-07-10T11:26:38,545 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: create_table: Table(tableName:aaa, dbName:db1, owner:user01, createTime:1688959598, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.ql.io.orc.OrcSerde, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{}), storedAsSubDirectories:false), partitionKeys:[], parameters:{totalSize=0, numRows=0, rawDataSize=0, COLUMN_STATS_ACCURATE={"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}, numFiles=0, bucketing_version=2}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE, privileges:PrincipalPrivilegeSet(userPrivileges:{}, groupPrivileges:null, rolePrivileges:null), temporary:false, catName:hive, ownerType:USER)
2023-07-10T11:26:38,546 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=create_table: Table(tableName:aaa, dbName:db1, owner:user01, createTime:1688959598, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.ql.io.orc.OrcSerde, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{}), storedAsSubDirectories:false), partitionKeys:[], parameters:{totalSize=0, numRows=0, rawDataSize=0, COLUMN_STATS_ACCURATE={"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}, numFiles=0, bucketing_version=2}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE, privileges:PrincipalPrivilegeSet(userPrivileges:{}, groupPrivileges:null, rolePrivileges:null), temporary:false, catName:hive, ownerType:USER)
2023-07-10T11:26:38,565 INFO [pool-11-thread-184]: utils.FileUtils (FileUtils.java:mkdir(167)) - Creating directory if it doesn't exist: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/aaa
2023-07-10T11:26:46,476 INFO [pool-11-thread-3]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 2: get_config_value: name=metastore.batch.retrieve.max defaultValue=50
2023-07-10T11:26:46,476 INFO [pool-11-thread-3]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=hive/lyh206.hde.com@LYHHAKRB206.COMip=10.121.65.206cmd=get_config_value: name=metastore.batch.retrieve.max defaultValue=50
2023-07-10T11:26:49,029 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.aaa
2023-07-10T11:26:49,029 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.aaa
详细如下:
2023-07-10T11:25:58,464 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: hooks.ATSHook (ATSHook.java:<init>(146)) - Created ATS Hook
2023-07-10T11:25:58,464 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: hooks.ATSHook (ATSHook.java:<init>(146)) - Created ATS Hook
2023-07-10T11:25:58,465 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:checkConcurrency(288)) - Concurrency mode is disabled, not creating a lock manager
2023-07-10T11:25:58,503 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:compile(669)) - Semantic Analysis Completed (retrial = false)
2023-07-10T11:25:58,504 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:getSchema(377)) - Returning Hive schema: Schema(fieldSchemas:null, properties:null)
2023-07-10T11:25:58,504 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:compile(784)) - Completed compiling command(queryId=hive_20230710112558_57417104-6ec3-4c95-84cb-8ea71d8f263f); Time taken: 0.059 seconds
2023-07-10T11:25:58,505 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,505 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:resetThreadName(452)) - Resetting thread name to HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,508 INFO [HiveServer2-Background-Pool: Thread-9597]: reexec.ReExecDriver (ReExecDriver.java:run(156)) - Execution #1 of query
2023-07-10T11:25:58,508 INFO [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (Driver.java:checkConcurrency(288)) - Concurrency mode is disabled, not creating a lock manager
2023-07-10T11:25:58,508 INFO [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (Driver.java:execute(2323)) - Executing command(queryId=hive_20230710112558_57417104-6ec3-4c95-84cb-8ea71d8f263f): TRUNCATE TABLE person
2023-07-10T11:25:58,510 INFO [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (Driver.java:launchTask(2730)) - Starting task [Stage-0:DDL] in serial mode
2023-07-10T11:25:58,694 ERROR [HiveServer2-Background-Pool: Thread-9597]: exec.DDLTask (DDLTask.java:failed(927)) - Failed
org.apache.hadoop.hive.ql.metadata.HiveException: Exception while processing
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5236) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.access$700(SQLOperation.java:87) ~[hive-service-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork$1.run(SQLOperation.java:316) ~[hive-service-3.1.3.jar:3.1.3]
at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]
at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork.run(SQLOperation.java:329) ~[hive-service-3.1.3.jar:3.1.3]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[?:1.8.0-internal]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]
at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
)
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1066) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
... 21 more
Caused by: org.apache.hadoop.hive.metastore.api.MetaException: Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:86) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_truncate_table(ThriftHiveMetastore.java:1867) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.truncate_table(ThriftHiveMetastore.java:1852) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1292) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1286) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient.truncateTable(SessionHiveMetaStoreClient.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.invoke(RetryingMetaStoreClient.java:212) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient$SynchronizedHandler.invoke(HiveMetaStoreClient.java:2773) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1064) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
... 21 more
2023-07-10T11:25:58,695 INFO [HiveServer2-Background-Pool: Thread-9597]: reexec.ReOptimizePlugin (ReOptimizePlugin.java:run(70)) - ReOptimization: retryPossible: false
2023-07-10T11:25:58,696 ERROR [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (SessionState.java:printError(1250)) - FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Exception while processing
2023-07-10T11:25:58,696 INFO [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (Driver.java:execute(2599)) - Completed executing command(queryId=hive_20230710112558_57417104-6ec3-4c95-84cb-8ea71d8f263f); Time taken: 0.188 seconds
2023-07-10T11:25:58,696 INFO [HiveServer2-Background-Pool: Thread-9597]: ql.Driver (Driver.java:checkConcurrency(288)) - Concurrency mode is disabled, not creating a lock manager
2023-07-10T11:25:58,703 ERROR [HiveServer2-Background-Pool: Thread-9597]: operation.Operation (SQLOperation.java:run(320)) - Error running hive query:
org.apache.hive.service.cli.HiveSQLException: Error while processing statement: FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Exception while processing
at org.apache.hive.service.cli.operation.Operation.toSQLException(Operation.java:335) ~[hive-service-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:226) ~[hive-service-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.access$700(SQLOperation.java:87) ~[hive-service-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork$1.run(SQLOperation.java:316) ~[hive-service-3.1.3.jar:3.1.3]
at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]
at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]
at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork.run(SQLOperation.java:329) ~[hive-service-3.1.3.jar:3.1.3]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[?:1.8.0-internal]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]
at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Exception while processing
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5236) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
... 11 more
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
)
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1066) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
... 11 more
Caused by: org.apache.hadoop.hive.metastore.api.MetaException: Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshots
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)
at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)
at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result$truncate_table_resultStandardScheme.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$truncate_table_result.read(ThriftHiveMetastore.java) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:86) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_truncate_table(ThriftHiveMetastore.java:1867) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.truncate_table(ThriftHiveMetastore.java:1852) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1292) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.truncateTable(HiveMetaStoreClient.java:1286) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient.truncateTable(SessionHiveMetaStoreClient.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.invoke(RetryingMetaStoreClient.java:212) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient$SynchronizedHandler.invoke(HiveMetaStoreClient.java:2773) ~[hive-exec-3.1.3.jar:3.1.3]
at com.sun.proxy.$Proxy54.truncateTable(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.ql.metadata.Hive.truncateTable(Hive.java:1064) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.truncateTable(DDLTask.java:5234) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.DDLTask.execute(DDLTask.java:623) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2732) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2403) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2079) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1777) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1771) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157) ~[hive-exec-3.1.3.jar:3.1.3]
at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224) ~[hive-service-3.1.3.jar:3.1.3]
... 11 more
2023-07-10T11:25:58,719 INFO [HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,720 INFO [HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:updateThreadName(441)) - Updating thread name to 2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,720 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,720 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:resetThreadName(452)) - Resetting thread name to HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,721 INFO [HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,721 INFO [HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:updateThreadName(441)) - Updating thread name to 2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,721 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,721 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:resetThreadName(452)) - Resetting thread name to HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,722 INFO [HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,722 INFO [HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:updateThreadName(441)) - Updating thread name to 2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,722 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,722 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:resetThreadName(452)) - Resetting thread name to HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,724 INFO [HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,724 INFO [HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:updateThreadName(441)) - Updating thread name to 2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:25:58,725 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: operation.OperationManager (OperationManager.java:closeOperation(286)) - Closing operation: OperationHandle [opType=EXECUTE_STATEMENT, getHandleIdentifier()=9fa6df68-a350-40c2-96ee-c1088d9f4599]
2023-07-10T11:25:58,725 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: operation.OperationManager (OperationManager.java:removeOperation(208)) - Removed queryId: hive_20230710112558_57417104-6ec3-4c95-84cb-8ea71d8f263f corresponding to operation: OperationHandle [opType=EXECUTE_STATEMENT, getHandleIdentifier()=9fa6df68-a350-40c2-96ee-c1088d9f4599]
2023-07-10T11:25:58,725 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: operation.Operation (Operation.java:cleanupOperationLog(289)) - Closing operation log /tmp/hive/operation_logs/2ad8252e-52f9-4447-9902-ef953a80c25a/hive_20230710112558_57417104-6ec3-4c95-84cb-8ea71d8f263f without delay
2023-07-10T11:25:58,725 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:25:58,725 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:resetThreadName(452)) - Resetting thread name to HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:26:18,476 INFO [HiveServer2-Handler-Pool: Thread-98]: conf.HiveConf (HiveConf.java:getLogIdVar(5043)) - Using the default value passed in for log id: 2ad8252e-52f9-4447-9902-ef953a80c25a
2023-07-10T11:26:18,476 INFO [HiveServer2-Handler-Pool: Thread-98]: session.SessionState (SessionState.java:updateThreadName(441)) - Updating thread name to 2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98
2023-07-10T11:26:18,477 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: operation.OperationManager (OperationManager.java:addOperation(193)) - Adding operation: OperationHandle [opType=EXECUTE_STATEMENT, getHandleIdentifier()=21b4851f-ef1d-40e4-bae5-8e6518495228]
2023-07-10T11:26:18,477 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:compile(557)) - Compiling command(queryId=hive_20230710112618_336e604e-8b99-4d1c-af64-0f6cb54eba18): show tables
2023-07-10T11:26:18,495 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: hooks.ATSHook (ATSHook.java:<init>(146)) - Created ATS Hook
2023-07-10T11:26:18,495 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: hooks.ATSHook (ATSHook.java:<init>(146)) - Created ATS Hook
2023-07-10T11:26:18,495 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: hooks.ATSHook (ATSHook.java:<init>(146)) - Created ATS Hook
2023-07-10T11:26:18,496 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:checkConcurrency(288)) - Concurrency mode is disabled, not creating a lock manager
2023-07-10T11:26:18,512 INFO [2ad8252e-52f9-4447-9902-ef953a80c25a HiveServer2-Handler-Pool: Thread-98]: ql.Driver (Driver.java:compile(669)) - Semantic Analysis Completed (retrial = false)
详细的
FileUtils.java:moveToTrash(94)) - Failed to move to trash: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/person; Force to delete it.2023-07-10T11:25:58,684 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(166)) - Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)org.apache.hadoop.ipc.RemoteException: The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1579) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.Client.call(Client.java:1525) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.Client.call(Client.java:1422) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:231) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118) ~[hadoop-common-3.2.3.jar:?]at com.sun.proxy.$Proxy37.delete(Unknown Source) ~[?:?]at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.delete(ClientNamenodeProtocolTranslatorPB.java:644) ~[hadoop-hdfs-client-3.2.3.jar:?]at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359) ~[hadoop-common-3.2.3.jar:?]at com.sun.proxy.$Proxy38.delete(Unknown Source) ~[?:?]at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1658) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:979) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:976) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:986) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hive.metastore.utils.FileUtils.moveToTrash(FileUtils.java:97) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:41) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841) ~[hive-exec-3.1.3.jar:3.1.3]at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108) ~[hive-exec-3.1.3.jar:3.1.3]at com.sun.proxy.$Proxy31.truncate_table(Unknown Source) ~[?:?]at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286) ~[hive-exec-3.1.3.jar:3.1.3]at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]2023-07-10T11:25:58,685 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(167)) - Converting exception to MetaException2023-07-10T11:25:58,686 ERROR [pool-11-thread-184]: metastore.RetryingHMSHandler (RetryingHMSHandler.java:invokeInternal(201)) - MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957))at org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.logAndThrowMetaException(MetaStoreUtils.java:168)at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:51)at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374)at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362)at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841)at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)at java.lang.reflect.Method.invoke(Method.java:498)at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108)at com.sun.proxy.$Proxy31.truncate_table(Unknown Source)at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094)at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078)at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631)at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)at java.lang.Thread.run(Thread.java:748)
2023-07-10T11:26:00,848 INFO [pool-6-thread-10]: txn.AcidOpenTxnsCounterService (AcidOpenTxnsCounterService.java:run(51)) - AcidOpenTxnsCounterService ran for 0 seconds. isAliveCounter = 1734632023-07-10T11:26:18,497 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db12023-07-10T11:26:18,497 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db12023-07-10T11:26:18,520 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db12023-07-10T11:26:18,520 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db12023-07-10T11:26:18,533 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_tables: db=@hive#db1 pat=.*2023-07-10T11:26:18,533 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_tables: db=@hive#db1 pat=.*2023-07-10T11:26:21,118 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.person2023-07-10T11:26:21,118 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.person2023-07-10T11:26:21,142 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.person2023-07-10T11:26:21,142 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.person2023-07-10T11:26:21,189 WARN [pool-11-thread-184]: utils.FileUtils (FileUtils.java:moveToTrash(94)) - Failed to move to trash: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/person; Force to delete it.2023-07-10T11:26:21,194 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(166)) - Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
org.apache.hadoop.ipc.RemoteException: The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957)
at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1579) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.Client.call(Client.java:1525) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.Client.call(Client.java:1422) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:231) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:118) ~[hadoop-common-3.2.3.jar:?]at com.sun.proxy.$Proxy37.delete(Unknown Source) ~[?:?]at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.delete(ClientNamenodeProtocolTranslatorPB.java:644) ~[hadoop-hdfs-client-3.2.3.jar:?]at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359) ~[hadoop-common-3.2.3.jar:?]at com.sun.proxy.$Proxy38.delete(Unknown Source) ~[?:?]at org.apache.hadoop.hdfs.DFSClient.delete(DFSClient.java:1658) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:979) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem$19.doCall(DistributedFileSystem.java:976) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.hdfs.DistributedFileSystem.delete(DistributedFileSystem.java:986) ~[hadoop-hdfs-client-3.2.3.jar:?]at org.apache.hadoop.hive.metastore.utils.FileUtils.moveToTrash(FileUtils.java:97) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:41) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841) ~[hive-exec-3.1.3.jar:3.1.3]at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0-internal]at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0-internal]at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0-internal]at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0-internal]at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108) ~[hive-exec-3.1.3.jar:3.1.3]at com.sun.proxy.$Proxy31.truncate_table(Unknown Source) ~[?:?]at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0-internal]at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0-internal]at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762) ~[hadoop-common-3.2.3.jar:?]at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631) ~[hive-exec-3.1.3.jar:3.1.3]at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286) ~[hive-exec-3.1.3.jar:3.1.3]at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[?:1.8.0-internal]at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[?:1.8.0-internal]at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0-internal]2023-07-10T11:26:21,194 ERROR [pool-11-thread-184]: utils.MetaStoreUtils (MetaStoreUtils.java:logAndThrowMetaException(167)) - Converting exception to MetaException2023-07-10T11:26:21,195 ERROR [pool-11-thread-184]: metastore.RetryingHMSHandler (RetryingHMSHandler.java:invokeInternal(201)) - MetaException(message:Got exception: org.apache.hadoop.ipc.RemoteException The directory /warehouse/tablespace/managed/hive/db1.db/person cannot be deleted since /warehouse/tablespace/managed/hive/db1.db/person is snapshottable and already has snapshotsat org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:297)at org.apache.hadoop.hdfs.server.namenode.FSDirSnapshotOp.checkSnapshot(FSDirSnapshotOp.java:327)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:63)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.deleteInternal(FSDirDeleteOp.java:190)at org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.delete(FSDirDeleteOp.java:121)at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:3223)at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.delete(NameNodeRpcServer.java:1132)at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.delete(ClientNamenodeProtocolServerSideTranslatorPB.java:708)at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:549)at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:518)at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1086)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1029)at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:957)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2957))at org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.logAndThrowMetaException(MetaStoreUtils.java:168)at org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl.deleteDir(HiveMetaStoreFsImpl.java:51)at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:374)at org.apache.hadoop.hive.metastore.Warehouse.deleteDir(Warehouse.java:362)at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.truncate_table(HiveMetaStore.java:2841)at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)at java.lang.reflect.Method.invoke(Method.java:498)at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108)at com.sun.proxy.$Proxy31.truncate_table(Unknown Source)at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15094)at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$truncate_table.getResult(ThriftHiveMetastore.java:15078)at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631)at java.security.AccessController.doPrivileged(Native Method)at javax.security.auth.Subject.doAs(Subject.java:422)at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1762)at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631)at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)at java.lang.Thread.run(Thread.java:748)
2023-07-10T11:26:38,518 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_database: @hive#db12023-07-10T11:26:38,518 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_database: @hive#db12023-07-10T11:26:38,545 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: create_table: Table(tableName:aaa, dbName:db1, owner:user01, createTime:1688959598, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.ql.io.orc.OrcSerde, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{}), storedAsSubDirectories:false), partitionKeys:[], parameters:{totalSize=0, numRows=0, rawDataSize=0, COLUMN_STATS_ACCURATE={"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}, numFiles=0, bucketing_version=2}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE, privileges:PrincipalPrivilegeSet(userPrivileges:{}, groupPrivileges:null, rolePrivileges:null), temporary:false, catName:hive, ownerType:USER)2023-07-10T11:26:38,546 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=create_table: Table(tableName:aaa, dbName:db1, owner:user01, createTime:1688959598, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.ql.io.orc.OrcSerde, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{}), storedAsSubDirectories:false), partitionKeys:[], parameters:{totalSize=0, numRows=0, rawDataSize=0, COLUMN_STATS_ACCURATE={"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}, numFiles=0, bucketing_version=2}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE, privileges:PrincipalPrivilegeSet(userPrivileges:{}, groupPrivileges:null, rolePrivileges:null), temporary:false, catName:hive, ownerType:USER)2023-07-10T11:26:38,565 INFO [pool-11-thread-184]: utils.FileUtils (FileUtils.java:mkdir(167)) - Creating directory if it doesn't exist: hdfs://mycluster/warehouse/tablespace/managed/hive/db1.db/aaa2023-07-10T11:26:46,476 INFO [pool-11-thread-3]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 2: get_config_value: name=metastore.batch.retrieve.max defaultValue=502023-07-10T11:26:46,476 INFO [pool-11-thread-3]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=hive/lyh206.hde.com@LYHHAKRB206.COMip=10.121.65.206cmd=get_config_value: name=metastore.batch.retrieve.max defaultValue=502023-07-10T11:26:49,029 INFO [pool-11-thread-184]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(897)) - 155: get_table : tbl=hive.db1.aaa2023-07-10T11:26:49,029 INFO [pool-11-thread-184]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(349)) - ugi=user01ip=10.121.65.207cmd=get_table : tbl=hive.db1.aaa