public class org.apache.hadoop.hive.druid.TestDruidStorageHandler extends java.lang.Object
{
public final org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule;
public final org.junit.rules.TemporaryFolder temporaryFolder;
private static final java.lang.String DB_NAME;
private static final java.lang.String TABLE_NAME;
private static final java.lang.String DATA_SOURCE_NAME;
private java.lang.String segmentsTable;
private java.lang.String tableWorkingPath;
private org.apache.hadoop.conf.Configuration config;
private org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler;
private final org.apache.hadoop.hive.metastore.api.Table tableMock;
public void <init>()
{
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v;
java.lang.Object v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.junit.rules.TemporaryFolder v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
specialinvoke v.<java.lang.Object: void <init>()>();
v = new org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule;
specialinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: void <init>()>();
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule> = v;
v = new org.junit.rules.TemporaryFolder;
specialinvoke v.<org.junit.rules.TemporaryFolder: void <init>()>();
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.junit.rules.TemporaryFolder temporaryFolder> = v;
v = staticinvoke <org.mockito.Mockito: java.lang.Object mock(java.lang.Class)>(class "Lorg/apache/hadoop/hive/metastore/api/Table;");
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock> = v;
return;
}
private org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String) throws java.io.IOException
{
org.joda.time.DateTimeZone v;
org.joda.time.Interval v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
java.lang.String v;
java.lang.Integer v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v := @parameter: java.lang.String;
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 170L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
return v;
}
private org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String, org.joda.time.Interval, java.lang.String, org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec) throws java.io.IOException
{
org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.io.File v;
org.joda.time.Interval v;
java.lang.String v, v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v := @parameter: java.lang.String;
v := @parameter: org.joda.time.Interval;
v := @parameter: java.lang.String;
v := @parameter: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec;
v = new java.io.File;
specialinvoke v.<java.io.File: void <init>(java.lang.String)>(v);
staticinvoke <org.apache.commons.io.FileUtils: void writeStringToFile(java.io.File,java.lang.String)>(v, "dummySegmentData");
v = staticinvoke <org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder builder()>();
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder dataSource(java.lang.String)>("default.testName");
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder version(java.lang.String)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder interval(org.joda.time.Interval)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder shardSpec(org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object)>("path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder loadSpec(java.util.Map)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder size(long)>(1000L);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment build()>();
return v;
}
public void before() throws java.lang.Throwable
{
java.lang.Integer v, v, v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v, v, v, v, v, v;
org.apache.hadoop.fs.Path v;
org.apache.hadoop.hive.metastore.api.StorageDescriptor v;
java.util.UUID v;
java.util.HashMap v;
java.util.function.Supplier v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v, v, v;
int v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v;
org.mockito.stubbing.OngoingStubbing v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
java.io.File v;
org.apache.hadoop.hive.metastore.api.Table v, v, v, v, v;
java.lang.Object v, v, v;
org.junit.rules.TemporaryFolder v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.junit.rules.TemporaryFolder temporaryFolder>;
v = virtualinvoke v.<org.junit.rules.TemporaryFolder: java.io.File newFolder()>();
v = virtualinvoke v.<java.io.File: java.lang.String getAbsolutePath()>();
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath> = v;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig: java.lang.String getSegmentsTable()>();
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String segmentsTable> = v;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>("external.table.purge", "TRUE");
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: int getPartitionKeysSize()>();
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>(v);
v = staticinvoke <org.mockito.Mockito: java.lang.Object mock(java.lang.Class)>(class "Lorg/apache/hadoop/hive/metastore/api/StorageDescriptor;");
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.StorageDescriptor: int getBucketColsSize()>();
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: org.apache.hadoop.hive.metastore.api.StorageDescriptor getSd()>();
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.lang.String getDbName()>();
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>("default");
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.lang.String getTableName()>();
v = staticinvoke <org.mockito.Mockito: org.mockito.stubbing.OngoingStubbing when(java.lang.Object)>(v);
interfaceinvoke v.<org.mockito.stubbing.OngoingStubbing: org.mockito.stubbing.OngoingStubbing thenReturn(java.lang.Object)>("testName");
v = new org.apache.hadoop.conf.Configuration;
specialinvoke v.<org.apache.hadoop.conf.Configuration: void <init>()>();
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config> = v;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYID>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = staticinvoke <java.util.UUID: java.util.UUID randomUUID()>();
v = virtualinvoke v.<java.util.UUID: java.lang.String toString()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("hive-\u0001");
virtualinvoke v.<org.apache.hadoop.conf.Configuration: void set(java.lang.String,java.lang.String)>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_WORKING_DIR>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
virtualinvoke v.<org.apache.hadoop.conf.Configuration: void set(java.lang.String,java.lang.String)>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, "finalSegmentDir");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
virtualinvoke v.<org.apache.hadoop.conf.Configuration: void set(java.lang.String,java.lang.String)>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
virtualinvoke v.<org.apache.hadoop.conf.Configuration: void set(java.lang.String,java.lang.String)>("hive.druid.maxTries", "0");
v = new org.apache.hadoop.hive.druid.DruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
specialinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void <init>(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler> = v;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void setConf(org.apache.hadoop.conf.Configuration)>(v);
return;
}
public void tearDown()
{
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.junit.rules.TemporaryFolder v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.junit.rules.TemporaryFolder temporaryFolder>;
virtualinvoke v.<org.junit.rules.TemporaryFolder: void delete()>();
return;
}
public void testPreCreateTableWillCreateSegmentsTable() throws org.apache.hadoop.hive.metastore.api.MetaException
{
java.lang.Throwable v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v;
org.skife.jdbi.v.DBI v;
java.lang.String v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v;
boolean v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v, v;
org.apache.hadoop.hive.metastore.api.Table v;
org.skife.jdbi.v.Handle v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility: org.skife.jdbi.v.DBI getDBI()>();
v = virtualinvoke v.<org.skife.jdbi.v.DBI: org.skife.jdbi.v.Handle open()>();
label:
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String segmentsTable>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility: boolean tableExists(org.skife.jdbi.v.Handle,java.lang.String)>(v, v);
staticinvoke <org.junit.Assert: void assertFalse(boolean)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String segmentsTable>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility: boolean tableExists(org.skife.jdbi.v.Handle,java.lang.String)>(v, v);
staticinvoke <org.junit.Assert: void assertTrue(boolean)>(v);
label:
if v == null goto label;
interfaceinvoke v.<org.skife.jdbi.v.Handle: void close()>();
goto label;
label:
v := @caughtexception;
throw v;
label:
return;
catch java.lang.Throwable from label to label with label;
}
public void testPreCreateTableWhenDataSourceExists() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
org.apache.hadoop.fs.Path v, v;
java.lang.String v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v;
org.apache.hive.druid.org.apache.druid.indexer.SQLMetadataStorageUpdaterJobHandler v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v;
org.apache.hadoop.hive.metastore.api.Table v;
java.util.List v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility: void createSegmentTable()>();
v = new org.apache.hive.druid.org.apache.druid.indexer.SQLMetadataStorageUpdaterJobHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
specialinvoke v.<org.apache.hive.druid.org.apache.druid.indexer.SQLMetadataStorageUpdaterJobHandler: void <init>(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "intermediatePath");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String segmentsTable>;
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.indexer.SQLMetadataStorageUpdaterJobHandler: void publishSegments(java.lang.String,java.util.List,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
return;
}
public void testCommitCreateTablePlusCommitDropTableWithoutPurge() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.fs.Path v, v, v, v;
java.util.Collection v, v;
java.lang.Object[] v, v, v, v;
java.lang.String[] v;
java.util.function.Supplier v, v;
java.util.ArrayList v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v, v;
java.lang.String v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v;
org.apache.hadoop.hive.metastore.api.Table v, v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String)>(v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitDropTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testCommitCreateTablePlusCommitDropTableWithPurge() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.fs.Path v, v, v, v;
java.util.Collection v, v;
java.lang.Object[] v, v, v, v;
java.lang.String[] v;
java.util.function.Supplier v, v;
java.util.ArrayList v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v, v;
java.lang.String v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v;
org.apache.hadoop.hive.metastore.api.Table v, v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String)>(v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitDropTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testCommitCreateEmptyTablePlusCommitDropTableWithoutPurge() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
java.lang.Object[] v, v, v, v;
java.util.function.Supplier v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.util.ArrayList v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
java.util.Collection v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v;
org.apache.hadoop.hive.metastore.api.Table v, v, v;
java.lang.Object v, v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitDropTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testCommitCreateEmptyTablePlusCommitDropTableWithPurge() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
java.lang.Object[] v, v, v, v;
java.util.function.Supplier v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.util.ArrayList v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
java.util.Collection v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v, v;
org.apache.hadoop.hive.metastore.api.Table v, v, v;
java.lang.Object v, v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitDropTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testCommitInsertTable() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
java.lang.Object[] v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v;
java.lang.String[] v;
java.util.function.Supplier v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.util.ArrayList v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.fs.Path v, v, v, v;
java.lang.String v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
java.util.Collection v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.metastore.api.Table v, v;
java.lang.Object v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String)>(v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testCommitEmptyInsertTable() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
java.lang.Object[] v, v;
java.util.Collection v;
java.util.function.Supplier v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v, v;
java.util.ArrayList v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.Object v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList()>();
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
return;
}
public void testDeleteSegment() throws java.io.IOException, org.apache.hive.druid.org.apache.druid.segment.loading.SegmentLoadingException
{
org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.conf.Configuration v, v;
org.apache.hadoop.fs.Path v, v, v, v, v, v, v, v, v, v, v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
boolean v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
org.apache.hadoop.fs.FSDataOutputStream v;
java.lang.String v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
java.io.File v;
org.apache.hadoop.fs.LocalFileSystem v;
org.junit.rules.TemporaryFolder v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.junit.rules.TemporaryFolder temporaryFolder>;
v = virtualinvoke v.<org.junit.rules.TemporaryFolder: java.io.File newFolder()>();
v = virtualinvoke v.<java.io.File: java.lang.String getAbsolutePath()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String)>(v);
v = staticinvoke <org.apache.hive.druid.org.apache.druid.indexer.JobHelper: org.apache.hadoop.fs.Path makeFileNamePath(org.apache.hadoop.fs.Path,org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, v, "index.zip", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = staticinvoke <org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder builder(org.apache.hive.druid.org.apache.druid.timeline.DataSegment)>(v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object)>("path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder loadSpec(java.util.Map)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment$Builder: org.apache.hive.druid.org.apache.druid.timeline.DataSegment build()>();
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: org.apache.hadoop.fs.FSDataOutputStream create(org.apache.hadoop.fs.Path,boolean)>(v, 1);
virtualinvoke v.<java.io.OutputStream: void close()>();
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertTrue(java.lang.String,boolean)>("index file is not created ??", v);
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertTrue(boolean)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void deleteSegment(org.apache.hive.druid.org.apache.druid.timeline.DataSegment)>(v);
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertFalse(java.lang.String,boolean)>("Index file still there ??", v);
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertFalse(java.lang.String,boolean)>("PartitionNum directory still there ??", v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertFalse(java.lang.String,boolean)>("Version directory still there ??", v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertFalse(java.lang.String,boolean)>("Interval directory still there ??", v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.Path: org.apache.hadoop.fs.Path getParent()>();
v = virtualinvoke v.<org.apache.hadoop.fs.LocalFileSystem: boolean exists(org.apache.hadoop.fs.Path)>(v);
staticinvoke <org.junit.Assert: void assertFalse(java.lang.String,boolean)>("Data source directory still there ??", v);
return;
}
public void testCommitInsertOverwriteTable() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
java.lang.Integer v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v, v, v, v;
org.apache.hadoop.fs.Path v, v, v, v, v, v;
java.net.URI v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.util.Collection v;
java.util.List v, v;
java.lang.Object[] v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v;
java.lang.String[] v;
java.util.function.Supplier v;
java.util.ArrayList v, v;
org.joda.time.Interval v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
java.io.File v;
org.apache.hadoop.hive.metastore.api.Table v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(180L, 250L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(1L, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Iterables: java.lang.Object getOnlyElement(java.lang.Iterable)>(v);
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.lang.String getVersion()>();
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.lang.String getVersion()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = new org.apache.hadoop.fs.Path;
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String getPathForHadoop()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String makeIndexPathName(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String)>(v, "index.zip");
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object,java.lang.Object,java.lang.Object)>("type", "hdfs", "path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.util.Map getLoadSpec()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = new java.io.File;
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.net.URI toUri()>();
specialinvoke v.<java.io.File: void <init>(java.net.URI)>(v);
v = staticinvoke <org.apache.commons.io.FileUtils: java.lang.String readFileToString(java.io.File)>(v);
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("dummySegmentData", v);
return;
}
public void testCommitMultiInsertOverwriteTable() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v, v, v;
java.lang.Integer v, v, v, v;
org.apache.hadoop.conf.Configuration v, v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.lang.Object[] v, v, v, v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v, v, v;
java.util.function.Supplier v;
java.util.ArrayList v, v, v, v, v, v, v, v, v, v;
org.joda.time.Interval v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v, v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.metastore.api.Table v, v, v, v, v, v, v, v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.fs.Path v, v, v, v, v, v, v, v, v, v, v;
java.util.Collection v, v, v, v, v;
java.util.List v, v, v, v, v, v, v, v, v;
java.lang.String[] v, v, v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v, v, v, v, v, v, v, v;
java.lang.Object v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(1L, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(0L, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(0L, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(0L, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(180L, 250L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(1L, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(200L, 250L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(1L, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 200L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 1);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(0L, v);
return;
}
private java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility, org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)
{
org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig v;
org.skife.jdbi.v.tweak.HandleCallback v;
org.skife.jdbi.v.DBI v;
java.lang.Object v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v := @parameter: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility;
v := @parameter: org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility: org.skife.jdbi.v.DBI getDBI()>();
v = staticinvoke <org.apache.hadoop.hive.druid.TestDruidStorageHandler$lambda_getUsedSegmentsList_1__5: org.skife.jdbi.v.tweak.HandleCallback bootstrap$(org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v);
v = virtualinvoke v.<org.skife.jdbi.v.DBI: java.lang.Object withHandle(org.skife.jdbi.v.tweak.HandleCallback)>(v);
return v;
}
public void testCommitInsertIntoTable() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v;
java.lang.Integer v, v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.lang.Object[] v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v;
java.util.function.Supplier v;
java.util.ArrayList v, v;
org.joda.time.Interval v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
java.io.File v;
org.apache.hadoop.hive.metastore.api.Table v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.fs.Path v, v, v, v, v, v;
java.net.URI v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
boolean v;
java.util.Collection v;
java.util.List v, v;
java.lang.String[] v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(1);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.lang.String getVersion()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("v0", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = v instanceof org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
staticinvoke <org.junit.Assert: void assertTrue(boolean)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec: int getPartitionNum()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = new org.apache.hadoop.fs.Path;
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String getPathForHadoop()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String makeIndexPathName(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String)>(v, "index.zip");
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object,java.lang.Object,java.lang.Object)>("type", "hdfs", "path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.util.Map getLoadSpec()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = new java.io.File;
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.net.URI toUri()>();
specialinvoke v.<java.io.File: void <init>(java.net.URI)>(v);
v = staticinvoke <org.apache.commons.io.FileUtils: java.lang.String readFileToString(java.io.File)>(v);
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("dummySegmentData", v);
return;
}
public void testInsertIntoAppendOneMorePartition() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v;
java.lang.Integer v, v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.lang.Object[] v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v;
java.util.function.Supplier v;
java.util.ArrayList v, v;
org.joda.time.Interval v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
java.io.File v;
org.apache.hadoop.hive.metastore.api.Table v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.fs.Path v, v, v, v, v, v;
java.net.URI v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
boolean v;
java.util.Collection v;
java.util.List v, v;
java.lang.String[] v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(1);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.lang.String getVersion()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("v0", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = v instanceof org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
staticinvoke <org.junit.Assert: void assertTrue(boolean)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec: int getPartitionNum()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(1L, v);
v = new org.apache.hadoop.fs.Path;
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String getPathForHadoop()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String makeIndexPathName(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String)>(v, "index.zip");
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object,java.lang.Object,java.lang.Object)>("type", "hdfs", "path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.util.Map getLoadSpec()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = new java.io.File;
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.net.URI toUri()>();
specialinvoke v.<java.io.File: void <init>(java.net.URI)>(v);
v = staticinvoke <org.apache.commons.io.FileUtils: java.lang.String readFileToString(java.io.File)>(v);
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("dummySegmentData", v);
return;
}
public void testCommitInsertIntoWhenDestinationSegmentFileExist() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v, v;
java.lang.Integer v, v, v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.lang.Object[] v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v, v;
java.util.function.Supplier v;
java.util.ArrayList v, v;
org.joda.time.Interval v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
java.io.File v, v;
org.apache.hadoop.hive.metastore.api.Table v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.fs.Path v, v, v, v, v, v, v, v;
java.net.URI v, v;
org.apache.hive.druid.com.google.common.collect.ImmutableMap v;
boolean v;
java.util.Collection v;
java.util.List v, v;
java.lang.String[] v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v, v;
java.lang.Object v, v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v = staticinvoke <java.util.Collections: java.util.List singletonList(java.lang.Object)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_conflict.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String getPathForHadoop()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String makeIndexPathName(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String)>(v, "index.zip");
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = new java.io.File;
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.net.URI toUri()>();
specialinvoke v.<java.io.File: void <init>(java.net.URI)>(v);
staticinvoke <org.apache.commons.io.FileUtils: void writeStringToFile(java.io.File,java.lang.String)>(v, "dummy");
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
v = newarray (java.lang.String)[1];
v[0] = "default.testName";
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Object[])>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.Collection getAllDataSourceNames(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.Lists: java.util.ArrayList newArrayList(java.lang.Iterable)>(v);
v = virtualinvoke v.<java.util.ArrayList: java.lang.Object[] toArray()>();
staticinvoke <org.junit.Assert: void assertArrayEquals(java.lang.Object[],java.lang.Object[])>(v, v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.util.List getUsedSegmentsList(org.apache.hadoop.hive.druid.DerbyConnectorTestUtility,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig)>(v, v);
v = interfaceinvoke v.<java.util.List: int size()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = interfaceinvoke v.<java.util.List: java.lang.Object get(int)>(1);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.lang.String getVersion()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("v0", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = v instanceof org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
staticinvoke <org.junit.Assert: void assertTrue(boolean)>(v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec getShardSpec()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec: int getPartitionNum()>();
staticinvoke <org.junit.Assert: void assertEquals(long,long)>(2L, v);
v = new org.apache.hadoop.fs.Path;
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String getPathForHadoop()>();
v = interfaceinvoke v.<org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher: java.lang.String makeIndexPathName(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,java.lang.String)>(v, "index.zip");
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = staticinvoke <org.apache.hive.druid.com.google.common.collect.ImmutableMap: org.apache.hive.druid.com.google.common.collect.ImmutableMap of(java.lang.Object,java.lang.Object,java.lang.Object,java.lang.Object)>("type", "hdfs", "path", v);
v = virtualinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.DataSegment: java.util.Map getLoadSpec()>();
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>(v, v);
v = new java.io.File;
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.net.URI toUri()>();
specialinvoke v.<java.io.File: void <init>(java.net.URI)>(v);
v = staticinvoke <org.apache.commons.io.FileUtils: java.lang.String readFileToString(java.io.File)>(v);
staticinvoke <org.junit.Assert: void assertEquals(java.lang.Object,java.lang.Object)>("dummySegmentData", v);
return;
}
public void testCommitInsertIntoWithConflictingIntervalSegment() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v, v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
java.lang.Integer v, v, v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.conf.Configuration v, v, v;
org.apache.hadoop.fs.Path v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.util.List v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v, v, v;
java.util.function.Supplier v;
org.joda.time.Interval v, v, v, v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.metastore.api.Table v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment[] v;
java.lang.Object v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = newarray (org.apache.hive.druid.org.apache.druid.timeline.DataSegment)[3];
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_1.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[0] = v;
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_2.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(150L, 200L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[1] = v;
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_3.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(200L, 300L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[2] = v;
v = staticinvoke <java.util.Arrays: java.util.List asList(java.lang.Object[])>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 300L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
return;
}
public void testCommitInsertIntoWithNonExtendableSegment() throws org.apache.hadoop.hive.metastore.api.MetaException, java.io.IOException
{
org.joda.time.DateTimeZone v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment v, v, v, v;
org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper v;
java.lang.Integer v, v, v;
org.apache.hadoop.hive.druid.TestDruidStorageHandler v;
org.apache.hadoop.conf.Configuration v, v, v;
org.apache.hadoop.fs.Path v, v, v, v, v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig v;
java.util.List v;
org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec v, v, v;
java.util.function.Supplier v;
org.joda.time.Interval v, v, v, v;
org.apache.hive.druid.org.apache.druid.timeline.partition.NoneShardSpec v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule v, v;
java.lang.String v, v, v, v, v, v, v;
org.apache.hadoop.hive.druid.DruidStorageHandler v, v, v;
org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher v;
org.apache.hadoop.hive.druid.DerbyConnectorTestUtility v;
org.apache.hadoop.hive.metastore.api.Table v, v;
org.apache.hive.druid.org.apache.druid.timeline.DataSegment[] v;
java.lang.Object v;
org.apache.hadoop.fs.LocalFileSystem v;
v := @this: org.apache.hadoop.hive.druid.TestDruidStorageHandler;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility getConnector()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule derbyConnectorRule>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DerbyConnectorTestUtility$DerbyConnectorRule: java.util.function.Supplier metadataTablesConfigSupplier()>();
v = interfaceinvoke v.<java.util.function.Supplier: java.lang.Object get()>();
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void preCreateTable(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.LocalFileSystem getLocal(org.apache.hadoop.conf.Configuration)>(v);
v = new org.apache.hadoop.fs.Path;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: java.lang.String tableWorkingPath>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: java.lang.String makeStagingName()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
v = newarray (org.apache.hive.druid.org.apache.druid.timeline.DataSegment)[3];
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_1.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = staticinvoke <org.apache.hive.druid.org.apache.druid.timeline.partition.NoneShardSpec: org.apache.hive.druid.org.apache.druid.timeline.partition.NoneShardSpec instance()>();
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[0] = v;
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_2.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(200L, 250L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[1] = v;
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index_old_3.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(250L, 300L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v0", v);
v[2] = v;
v = staticinvoke <java.util.Arrays: java.util.List asList(java.lang.Object[])>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void <init>()>();
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
virtualinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig: void setStorageDirectory(java.lang.String)>(v);
v = new org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
v = <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper JSON_MAPPER>;
specialinvoke v.<org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusher: void <init>(org.apache.hive.druid.org.apache.druid.storage.hdfs.HdfsDataSegmentPusherConfig,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.com.fasterxml.jackson.databind.ObjectMapper)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.conf.Configuration config>;
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: java.util.List publishSegmentsAndCommit(org.apache.hive.druid.org.apache.druid.metadata.SQLMetadataConnector,org.apache.hive.druid.org.apache.druid.metadata.MetadataStorageTablesConfig,java.lang.String,java.util.List,boolean,org.apache.hadoop.conf.Configuration,org.apache.hive.druid.org.apache.druid.segment.loading.DataSegmentPusher)>(v, v, "default.testName", v, 1, v, v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "index.zip");
v = virtualinvoke v.<org.apache.hadoop.fs.Path: java.lang.String toString()>();
v = new org.joda.time.Interval;
v = <org.joda.time.DateTimeZone: org.joda.time.DateTimeZone UTC>;
specialinvoke v.<org.joda.time.Interval: void <init>(long,long,org.joda.time.DateTimeZone)>(100L, 150L, v);
v = new org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hive.druid.org.apache.druid.timeline.partition.LinearShardSpec: void <init>(java.lang.Integer)>(v);
v = specialinvoke v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hive.druid.org.apache.druid.timeline.DataSegment createSegment(java.lang.String,org.joda.time.Interval,java.lang.String,org.apache.hive.druid.org.apache.druid.timeline.partition.ShardSpec)>(v, v, "v1", v);
v = new org.apache.hadoop.fs.Path;
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(org.apache.hadoop.fs.Path,java.lang.String)>(v, "segmentsDescriptorDir");
v = staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: org.apache.hadoop.fs.Path makeSegmentDescriptorOutputPath(org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v);
staticinvoke <org.apache.hadoop.hive.druid.DruidStorageHandlerUtils: void writeSegmentDescriptor(org.apache.hadoop.fs.FileSystem,org.apache.hive.druid.org.apache.druid.timeline.DataSegment,org.apache.hadoop.fs.Path)>(v, v, v);
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.druid.DruidStorageHandler druidStorageHandler>;
v = v.<org.apache.hadoop.hive.druid.TestDruidStorageHandler: org.apache.hadoop.hive.metastore.api.Table tableMock>;
virtualinvoke v.<org.apache.hadoop.hive.druid.DruidStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
return;
}
}