public class org.apache.hadoop.hive.kafka.KafkaStorageHandler extends org.apache.hadoop.hive.metastore.DefaultHiveMetaHook implements org.apache.hadoop.hive.ql.metadata.HiveStorageHandler
{
private static final org.slf4j.Logger LOG;
private static final java.lang.String KAFKA_STORAGE_HANDLER;
private org.apache.hadoop.conf.Configuration configuration;
private static final java.lang.String KAFKA_PREFIX;
public void <init>()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
specialinvoke v.<org.apache.hadoop.hive.metastore.DefaultHiveMetaHook: void <init>()>();
return;
}
public java.lang.Class getInputFormatClass()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
return class "Lorg/apache/hadoop/hive/kafka/KafkaInputFormat;";
}
public java.lang.Class getOutputFormatClass()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
return class "Lorg/apache/hadoop/hive/kafka/KafkaOutputFormat;";
}
public java.lang.Class getSerDeClass()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
return class "Lorg/apache/hadoop/hive/kafka/KafkaSerDe;";
}
public org.apache.hadoop.hive.metastore.HiveMetaHook getMetaHook()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
return v;
}
public org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider getAuthorizationProvider()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v = new org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
specialinvoke v.<org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider: void <init>()>();
return v;
}
public void configureInputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc, java.util.Map)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.ql.plan.TableDesc v;
java.util.Map v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: java.util.Map;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureCommonProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
return;
}
private void configureCommonProperties(org.apache.hadoop.hive.ql.plan.TableDesc, java.util.Map)
{
org.apache.hadoop.hive.ql.plan.TableDesc v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
java.util.Map v;
boolean v, v, v;
org.apache.hadoop.hive.kafka.KafkaTableProperties[] v;
java.util.function.Predicate v;
java.util.stream.Stream v, v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v, v, v, v, v, v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic v;
java.util.Properties v, v;
java.lang.IllegalArgumentException v, v;
java.util.function.Consumer v;
java.lang.Object v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: java.util.Map;
v = virtualinvoke v.<org.apache.hadoop.hive.ql.plan.TableDesc: java.util.Properties getProperties()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<java.util.Properties: java.lang.String getProperty(java.lang.String,java.lang.String)>(v, "");
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v == 0 goto label;
v = new java.lang.IllegalArgumentException;
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Kafka topic missing set table property->\u0001");
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
v = virtualinvoke v.<org.apache.hadoop.hive.ql.plan.TableDesc: java.util.Properties getProperties()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<java.util.Properties: java.lang.String getProperty(java.lang.String,java.lang.String)>(v, "");
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v == 0 goto label;
v = new java.lang.IllegalArgumentException;
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Broker address missing set table property->\u0001");
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>(v, v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties[] values()>();
v = staticinvoke <java.util.Arrays: java.util.stream.Stream stream(java.lang.Object[])>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_configureCommonProperties_0__40: java.util.function.Predicate bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream filter(java.util.function.Predicate)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_configureCommonProperties_1__41: java.util.function.Consumer bootstrap$(java.util.Map,org.apache.hadoop.hive.ql.plan.TableDesc)>(v, v);
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties WRITE_SEMANTIC_PROPERTY>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic: org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic EXACTLY_ONCE>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic: java.lang.String name()>();
v = virtualinvoke v.<java.lang.String: boolean equals(java.lang.Object)>(v);
if v == 0 goto label;
interfaceinvoke v.<java.util.Map: java.lang.Object put(java.lang.Object,java.lang.Object)>("kafka.consumer.isolation.level", "read_committed");
label:
return;
}
public void configureInputJobCredentials(org.apache.hadoop.hive.ql.plan.TableDesc, java.util.Map)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.ql.plan.TableDesc v;
java.util.Map v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: java.util.Map;
return;
}
public void configureOutputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc, java.util.Map)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.ql.plan.TableDesc v;
java.util.Map v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: java.util.Map;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureCommonProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
return;
}
public void configureTableJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc, java.util.Map)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.ql.plan.TableDesc v;
java.util.Map v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: java.util.Map;
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureInputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureOutputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
return;
}
public void configureJobConf(org.apache.hadoop.hive.ql.plan.TableDesc, org.apache.hadoop.mapred.JobConf)
{
org.apache.hadoop.hive.ql.plan.TableDesc v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
java.lang.Class[] v;
java.io.IOException v;
java.util.HashMap v;
java.lang.RuntimeException v;
org.apache.hadoop.mapred.JobConf v;
java.util.function.BiConsumer v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.plan.TableDesc;
v := @parameter: org.apache.hadoop.mapred.JobConf;
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureInputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void configureOutputJobProperties(org.apache.hadoop.hive.ql.plan.TableDesc,java.util.Map)>(v, v);
virtualinvoke v.<java.lang.Object: java.lang.Class getClass()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$set__42: java.util.function.BiConsumer bootstrap$(org.apache.hadoop.mapred.JobConf)>(v);
interfaceinvoke v.<java.util.Map: void forEach(java.util.function.BiConsumer)>(v);
label:
v = newarray (java.lang.Class)[1];
v[0] = class "Lorg/apache/hadoop/hive/kafka/KafkaStorageHandler;";
staticinvoke <org.apache.hadoop.hive.kafka.KafkaUtils: void copyDependencyJars(org.apache.hadoop.conf.Configuration,java.lang.Class[])>(v, v);
label:
goto label;
label:
v := @caughtexception;
v = new java.lang.RuntimeException;
specialinvoke v.<java.lang.RuntimeException: void <init>(java.lang.Throwable)>(v);
throw v;
label:
return;
catch java.io.IOException from label to label with label;
}
public void setConf(org.apache.hadoop.conf.Configuration)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.conf.Configuration v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.conf.Configuration;
v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration configuration> = v;
return;
}
public org.apache.hadoop.conf.Configuration getConf()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.conf.Configuration v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v = v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration configuration>;
return v;
}
public java.lang.String toString()
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
return "org.apache.hadoop.hive.kafka.KafkaStorageHandler";
}
public org.apache.hadoop.hive.ql.metadata.StorageHandlerInfo getStorageHandlerInfo(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.metastore.api.MetaException v, v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v, v;
java.util.Map v, v, v;
org.apache.hadoop.conf.Configuration v, v;
java.lang.String v, v, v, v, v;
boolean v, v, v;
org.apache.hadoop.hive.kafka.KafkaStorageHandlerInfo v;
java.util.Properties v;
java.util.function.Predicate v;
java.util.Set v;
org.apache.hadoop.hive.metastore.api.Table v;
java.util.function.Consumer v;
java.util.stream.Stream v, v;
java.lang.Class v, v;
java.lang.Object v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v == 0 goto label;
label:
v = new org.apache.hadoop.hive.metastore.api.MetaException;
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>("topic is null or empty");
throw v;
label:
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v == 0 goto label;
label:
v = new org.apache.hadoop.hive.metastore.api.MetaException;
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>("kafka brokers string is null or empty");
throw v;
label:
v = new java.util.Properties;
specialinvoke v.<java.util.Properties: void <init>()>();
v = class "Lorg/apache/kafkaesque/common/serialization/ByteArrayDeserializer;";
v = virtualinvoke v.<java.lang.Class: java.lang.String getName()>();
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("key.deserializer", v);
v = class "Lorg/apache/kafkaesque/common/serialization/ByteArrayDeserializer;";
v = virtualinvoke v.<java.lang.Class: java.lang.String getName()>();
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("value.deserializer", v);
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("bootstrap.servers", v);
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration getConf()>();
v = staticinvoke <org.apache.hadoop.hive.ql.exec.Utilities: java.lang.String getTaskId(org.apache.hadoop.conf.Configuration)>(v);
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("client.id", v);
v = staticinvoke <org.apache.hadoop.security.UserGroupInformation: boolean isSecurityEnabled()>();
if v == 0 goto label;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration getConf()>();
staticinvoke <org.apache.hadoop.hive.kafka.KafkaUtils: void addKerberosJaasConf(org.apache.hadoop.conf.Configuration,java.util.Properties)>(v, v);
label:
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = interfaceinvoke v.<java.util.Map: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_getStorageHandlerInfo_2__43: java.util.function.Predicate bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream filter(java.util.function.Predicate)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_getStorageHandlerInfo_3__44: java.util.function.Consumer bootstrap$(java.util.Properties)>(v);
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandlerInfo;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandlerInfo: void <init>(java.lang.String,java.util.Properties)>(v, v);
return v;
}
public java.net.URI getURIForAuth(org.apache.hadoop.hive.metastore.api.Table) throws java.net.URISyntaxException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v, v, v, v, v, v;
java.util.Map v;
org.apache.hadoop.conf.Configuration v;
java.lang.String v, v, v, v, v, v, v, v, v;
java.net.URI v;
org.apache.hadoop.hive.metastore.api.Table v;
java.lang.Object v, v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v = staticinvoke <org.apache.hadoop.hive.ql.security.authorization.HiveCustomStorageHandlerUtils: java.util.Map getTableProperties(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
goto label;
label:
v = v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration configuration>;
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = virtualinvoke v.<org.apache.hadoop.conf.Configuration: java.lang.String get(java.lang.String)>(v);
label:
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Set Table property \u0001");
staticinvoke <com.google.common.base.Preconditions: java.lang.Object checkNotNull(java.lang.Object,java.lang.Object)>(v, v);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_TOPIC>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Set Table property \u0001");
staticinvoke <com.google.common.base.Preconditions: java.lang.Object checkNotNull(java.lang.Object,java.lang.Object)>(v, v);
v = new java.net.URI;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("kafka://\u0001/\u0001");
specialinvoke v.<java.net.URI: void <init>(java.lang.String)>(v);
return v;
}
private java.util.Properties buildProducerProperties(org.apache.hadoop.hive.metastore.api.Table)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v;
java.util.Map v, v;
org.apache.hadoop.conf.Configuration v;
java.lang.String v, v, v;
boolean v, v;
java.util.Properties v;
java.util.function.Predicate v;
java.util.Set v;
java.lang.RuntimeException v;
org.apache.hadoop.hive.metastore.api.Table v;
java.util.function.Consumer v;
java.util.stream.Stream v, v;
java.lang.Class v, v;
java.lang.Object v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_BOOTSTRAP_SERVERS>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
if v == null goto label;
v = virtualinvoke v.<java.lang.String: boolean isEmpty()>();
if v == 0 goto label;
label:
v = new java.lang.RuntimeException;
specialinvoke v.<java.lang.RuntimeException: void <init>(java.lang.String)>("kafka brokers string is null or empty");
throw v;
label:
v = new java.util.Properties;
specialinvoke v.<java.util.Properties: void <init>()>();
v = class "Lorg/apache/kafkaesque/common/serialization/ByteArraySerializer;";
v = virtualinvoke v.<java.lang.Class: java.lang.String getName()>();
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("key.serializer", v);
v = class "Lorg/apache/kafkaesque/common/serialization/ByteArraySerializer;";
v = virtualinvoke v.<java.lang.Class: java.lang.String getName()>();
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("value.serializer", v);
virtualinvoke v.<java.util.Properties: java.lang.Object setProperty(java.lang.String,java.lang.String)>("bootstrap.servers", v);
v = staticinvoke <org.apache.hadoop.security.UserGroupInformation: boolean isSecurityEnabled()>();
if v == 0 goto label;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration getConf()>();
staticinvoke <org.apache.hadoop.hive.kafka.KafkaUtils: void addKerberosJaasConf(org.apache.hadoop.conf.Configuration,java.util.Properties)>(v, v);
label:
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = interfaceinvoke v.<java.util.Map: java.util.Set entrySet()>();
v = interfaceinvoke v.<java.util.Set: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_buildProducerProperties_4__45: java.util.function.Predicate bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream filter(java.util.function.Predicate)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_buildProducerProperties_5__46: java.util.function.Consumer bootstrap$(java.util.Properties)>(v);
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
return v;
}
public org.apache.hadoop.hive.metastore.api.LockType getLockType(org.apache.hadoop.hive.ql.hooks.WriteEntity)
{
org.apache.hadoop.hive.metastore.api.LockType v, v;
org.apache.hadoop.hive.ql.hooks.WriteEntity$WriteType v, v;
org.apache.hadoop.hive.ql.hooks.WriteEntity v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.ql.hooks.WriteEntity;
v = virtualinvoke v.<org.apache.hadoop.hive.ql.hooks.WriteEntity: org.apache.hadoop.hive.ql.hooks.WriteEntity$WriteType getWriteType()>();
v = <org.apache.hadoop.hive.ql.hooks.WriteEntity$WriteType: org.apache.hadoop.hive.ql.hooks.WriteEntity$WriteType INSERT>;
v = virtualinvoke v.<org.apache.hadoop.hive.ql.hooks.WriteEntity$WriteType: boolean equals(java.lang.Object)>(v);
if v == 0 goto label;
v = <org.apache.hadoop.hive.metastore.api.LockType: org.apache.hadoop.hive.metastore.api.LockType SHARED_READ>;
return v;
label:
v = <org.apache.hadoop.hive.metastore.api.LockType: org.apache.hadoop.hive.metastore.api.LockType SHARED_WRITE>;
return v;
}
private java.lang.String getQueryId()
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
java.lang.String v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration getConf()>();
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_ID>;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf: java.lang.String getVar(org.apache.hadoop.conf.Configuration,org.apache.hadoop.hive.conf.HiveConf$ConfVars)>(v, v);
return v;
}
public void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table, boolean) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.metastore.api.MetaException v, v, v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
java.util.Map v, v, v;
org.apache.hadoop.fs.Path v;
boolean v, v, v, v;
java.util.function.Predicate v, v, v;
java.lang.Exception v, v, v, v;
java.util.stream.Stream v;
java.util.HashMap v;
org.apache.hadoop.hive.kafka.KafkaTableProperties v, v, v;
java.util.HashSet v;
int v, v;
java.util.function.BiConsumer v;
java.lang.String v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic v;
java.util.Properties v;
org.slf4j.Logger v, v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler$1 v;
org.apache.hadoop.hive.metastore.api.Table v;
java.util.function.Consumer v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler$5 v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler$4 v;
java.lang.Object v, v, v, v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler$3 v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler$2 v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v := @parameter: boolean;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties WRITE_SEMANTIC_PROPERTY>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic: org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic EXACTLY_ONCE>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaOutputFormat$WriteSemantic: java.lang.String name()>();
v = virtualinvoke v.<java.lang.String: boolean equals(java.lang.Object)>(v);
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties HIVE_KAFKA_OPTIMISTIC_COMMIT>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = staticinvoke <java.lang.Boolean: boolean parseBoolean(java.lang.String)>(v);
if v != 0 goto label;
v = 1;
goto label;
label:
v = 0;
label:
if v == 0 goto label;
if v != 0 goto label;
label:
return;
label:
v = specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.fs.Path getQueryWorkingDir(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.util.Map getParameters()>();
v = <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties MAX_RETRIES>;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaTableProperties: java.lang.String getName()>();
v = interfaceinvoke v.<java.util.Map: java.lang.Object get(java.lang.Object)>(v);
v = staticinvoke <java.lang.Integer: int parseInt(java.lang.String)>(v);
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandler$1;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler$1: void <init>(org.apache.hadoop.hive.kafka.KafkaStorageHandler,org.apache.hadoop.fs.Path)>(v, v);
label:
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_commitInsertTable_6__47: java.util.function.Predicate bootstrap$()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.RetryUtils: java.lang.Object retry(org.apache.hadoop.hive.kafka.RetryUtils$Task,java.util.function.Predicate,int)>(v, v, v);
label:
goto label;
label:
v := @caughtexception;
v = <org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG>;
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Object)>("Can not fetch Transaction states due [{}]", v);
v = new org.apache.hadoop.hive.metastore.api.MetaException;
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>(v);
throw v;
label:
v = specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: java.util.Properties buildProducerProperties(org.apache.hadoop.hive.metastore.api.Table)>(v);
v = new java.util.HashMap;
specialinvoke v.<java.util.HashMap: void <init>()>();
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandler$2;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler$2: void <init>(org.apache.hadoop.hive.kafka.KafkaStorageHandler,java.util.Map,java.util.Map,java.util.Properties)>(v, v, v, v);
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandler$3;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler$3: void <init>(org.apache.hadoop.hive.kafka.KafkaStorageHandler,java.util.Map)>(v, v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_commitInsertTable_7__48: java.util.function.Predicate bootstrap$()>();
label:
staticinvoke <org.apache.hadoop.hive.kafka.RetryUtils: java.lang.Object retry(org.apache.hadoop.hive.kafka.RetryUtils$Task,java.util.function.Predicate,org.apache.hadoop.hive.kafka.RetryUtils$CleanupAfterFailure,int,java.lang.String)>(v, v, v, v, "Error while Builing Producers");
label:
goto label;
label:
v := @caughtexception;
v = <org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Throwable)>("Can not fetch build produces due [{}]", v);
v = new org.apache.hadoop.hive.metastore.api.MetaException;
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>(v);
throw v;
label:
v = new java.util.HashSet;
specialinvoke v.<java.util.HashSet: void <init>()>();
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandler$4;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler$4: void <init>(org.apache.hadoop.hive.kafka.KafkaStorageHandler,java.util.Map,java.util.Set)>(v, v, v);
label:
staticinvoke <org.apache.hadoop.hive.kafka.RetryUtils: java.lang.Object retry(org.apache.hadoop.hive.kafka.RetryUtils$Task,java.util.function.Predicate,int)>(v, v, v);
label:
goto label;
label:
v := @caughtexception;
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_commitInsertTable_8__50: java.util.function.BiConsumer bootstrap$()>();
interfaceinvoke v.<java.util.Map: void forEach(java.util.function.BiConsumer)>(v);
v = <org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Throwable)>("Commit transaction failed", v);
v = interfaceinvoke v.<java.util.Set: int size()>();
if v <= 0 goto label;
v = <org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG>;
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String)>("Partial Data Got Commited Some actions need to be Done");
v = interfaceinvoke v.<java.util.Set: java.util.stream.Stream stream()>();
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_commitInsertTable_9__51: java.util.function.Consumer bootstrap$()>();
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
label:
v = new org.apache.hadoop.hive.metastore.api.MetaException;
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>(v);
throw v;
label:
v = new org.apache.hadoop.hive.kafka.KafkaStorageHandler$5;
specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler$5: void <init>(org.apache.hadoop.hive.kafka.KafkaStorageHandler,org.apache.hadoop.fs.Path)>(v, v);
label:
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_commitInsertTable_10__49: java.util.function.Predicate bootstrap$()>();
staticinvoke <org.apache.hadoop.hive.kafka.RetryUtils: java.lang.Object retry(org.apache.hadoop.hive.kafka.RetryUtils$Task,java.util.function.Predicate,int)>(v, v, v);
label:
goto label;
label:
v := @caughtexception;
v = <org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG>;
v = virtualinvoke v.<java.lang.Exception: java.lang.String getMessage()>();
interfaceinvoke v.<org.slf4j.Logger: void error(java.lang.String,java.lang.Object,java.lang.Object)>("Faild to clean Query Working Directory [{}] due to [{}]", v, v);
label:
return;
catch java.lang.Exception from label to label with label;
catch java.lang.Exception from label to label with label;
catch java.lang.Exception from label to label with label;
catch java.lang.Exception from label to label with label;
}
public void preInsertTable(org.apache.hadoop.hive.metastore.api.Table, boolean) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.metastore.api.MetaException v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v := @parameter: boolean;
if v == 0 goto label;
v = new org.apache.hadoop.hive.metastore.api.MetaException;
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>("Kafa Table does not support the overwite SQL Smentic");
throw v;
label:
return;
}
public void rollbackInsertTable(org.apache.hadoop.hive.metastore.api.Table, boolean) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v := @parameter: boolean;
return;
}
public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.metastore.api.MetaException v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
java.lang.String v, v, v;
boolean v;
org.apache.hadoop.hive.kafka.KafkaTableProperties[] v, v;
java.util.function.Predicate v;
org.apache.hadoop.hive.metastore.api.Table v;
java.util.function.Consumer v, v;
java.util.stream.Stream v, v, v;
org.apache.hadoop.hive.metastore.TableType v, v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: java.lang.String getTableType()>();
v = <org.apache.hadoop.hive.metastore.TableType: org.apache.hadoop.hive.metastore.TableType EXTERNAL_TABLE>;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.TableType: java.lang.String toString()>();
v = virtualinvoke v.<java.lang.String: boolean equals(java.lang.Object)>(v);
if v != 0 goto label;
v = new org.apache.hadoop.hive.metastore.api.MetaException;
v = <org.apache.hadoop.hive.metastore.TableType: org.apache.hadoop.hive.metastore.TableType EXTERNAL_TABLE>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (org.apache.hadoop.hive.metastore.TableType)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("org.apache.hadoop.hive.kafka.KafkaStorageHandler supports only \u0001");
specialinvoke v.<org.apache.hadoop.hive.metastore.api.MetaException: void <init>(java.lang.String)>(v);
throw v;
label:
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties[] values()>();
v = staticinvoke <java.util.Arrays: java.util.stream.Stream stream(java.lang.Object[])>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$isMandatory__52: java.util.function.Predicate bootstrap$()>();
v = interfaceinvoke v.<java.util.stream.Stream: java.util.stream.Stream filter(java.util.function.Predicate)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_preCreateTable_11__53: java.util.function.Consumer bootstrap$(org.apache.hadoop.hive.metastore.api.Table)>(v);
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaTableProperties: org.apache.hadoop.hive.kafka.KafkaTableProperties[] values()>();
v = staticinvoke <java.util.Arrays: java.util.stream.Stream stream(java.lang.Object[])>(v);
v = staticinvoke <org.apache.hadoop.hive.kafka.KafkaStorageHandler$lambda_preCreateTable_12__54: java.util.function.Consumer bootstrap$(org.apache.hadoop.hive.metastore.api.Table)>(v);
interfaceinvoke v.<java.util.stream.Stream: void forEach(java.util.function.Consumer)>(v);
return;
}
public void rollbackCreateTable(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
return;
}
public void commitCreateTable(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: void commitInsertTable(org.apache.hadoop.hive.metastore.api.Table,boolean)>(v, 0);
return;
}
public void preDropTable(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
return;
}
public void rollbackDropTable(org.apache.hadoop.hive.metastore.api.Table) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
return;
}
public void commitDropTable(org.apache.hadoop.hive.metastore.api.Table, boolean) throws org.apache.hadoop.hive.metastore.api.MetaException
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.hive.metastore.api.Table v;
boolean v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v := @parameter: boolean;
return;
}
private org.apache.hadoop.fs.Path getQueryWorkingDir(org.apache.hadoop.hive.metastore.api.Table)
{
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.fs.Path v;
java.lang.String v, v;
org.apache.hadoop.hive.metastore.api.StorageDescriptor v;
org.apache.hadoop.hive.metastore.api.Table v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.hive.metastore.api.Table;
v = new org.apache.hadoop.fs.Path;
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.Table: org.apache.hadoop.hive.metastore.api.StorageDescriptor getSd()>();
v = virtualinvoke v.<org.apache.hadoop.hive.metastore.api.StorageDescriptor: java.lang.String getLocation()>();
v = specialinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: java.lang.String getQueryId()>();
specialinvoke v.<org.apache.hadoop.fs.Path: void <init>(java.lang.String,java.lang.String)>(v, v);
return v;
}
private void cleanWorkingDirectory(org.apache.hadoop.fs.Path) throws java.io.IOException
{
org.apache.hadoop.fs.FileSystem v;
org.apache.hadoop.conf.Configuration v;
org.apache.hadoop.hive.kafka.KafkaStorageHandler v;
org.apache.hadoop.fs.Path v;
v := @this: org.apache.hadoop.hive.kafka.KafkaStorageHandler;
v := @parameter: org.apache.hadoop.fs.Path;
v = virtualinvoke v.<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.apache.hadoop.conf.Configuration getConf()>();
v = staticinvoke <org.apache.hadoop.fs.FileSystem: org.apache.hadoop.fs.FileSystem get(org.apache.hadoop.conf.Configuration)>(v);
virtualinvoke v.<org.apache.hadoop.fs.FileSystem: boolean delete(org.apache.hadoop.fs.Path,boolean)>(v, 1);
return;
}
static void <clinit>()
{
org.slf4j.Logger v;
v = staticinvoke <org.slf4j.LoggerFactory: org.slf4j.Logger getLogger(java.lang.Class)>(class "Lorg/apache/hadoop/hive/kafka/KafkaStorageHandler;");
<org.apache.hadoop.hive.kafka.KafkaStorageHandler: org.slf4j.Logger LOG> = v;
return;
}
}