public final enum class org.apache.hadoop.hive.conf.HiveConf$ConfVars extends java.lang.Enum
{
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_MAX_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_RECORD_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTWRAPPER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars PLAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars STAGINGDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMRETIAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENCRYPTEDDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMFALLBACKNONENCRYPTEDDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FUNCTIONS_ROOT_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_APPROX_MAX_LOAD_TASKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARTITIONS_DUMP_PARALLELISM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RUN_DATA_COPY_TASKS_ON_TARGET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_MATERIALIZED_VIEWS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_ACID_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_ABORT_WRITE_TXN_AFTER_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ADD_RAW_RESERVED_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_EXTERNAL_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_EXTERNAL_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_TABLE_BASE_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_AUTHORIZATION_METADATA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_AUTHORIZATION_PROVIDER_SERVICE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_HANDLE_DENY_POLICY_TARGET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_FAILOVER_START;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_ADD_DENY_POLICY_TARGET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_CLIENT_READ_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_ATLAS_METADATA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_ENDPOINT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_REPLICATED_TO_DB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_CLIENT_READ_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_CLIENT_CONNECT_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SOURCE_CLUSTER_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_TARGET_CLUSTER_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_INTIAL_DELAY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_BACKOFF_COEFFICIENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_JITTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_MAX_DELAY_BETWEEN_RETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_TOTAL_DURATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_COPY_FILE_LIST_ITERATOR_RETRY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_WITH_DATA_COPY_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARALLEL_COPY_TASKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_OVERWRITE_TARGET_FOR_EXTERNAL_TABLE_COPY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_STATS_TOP_EVENTS_COUNTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALSCRATCHDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DOWNLOADED_RESOURCES_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIRPERMISSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITVIACHILD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITLOCALTASKVIACHILD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTERRORLIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars ALLOWPARTIALCONSUMP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERPERFIX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSRESULT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATECODEC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATETYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars BYTESPERREDUCER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXREDUCERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars PREEXECHOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars POSTEXECHOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars ONFAILUREHOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars QUERYREDACTORHOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENTSTATSPUBLISHERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars BASICSTATSTASKSMAXTHREADSFACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLETHREADNUMBER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESPECULATIVEEXECREDUCERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERSPULLINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONINGMODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTSPERNODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONCONVERT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXCREATEDFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULTPARTITIONNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULT_ZOOKEEPER_PARTITION_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SHOW_JOB_FAIL_DEBUG_INFO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_CAPTURE_STACKTRACES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TASKLOG_DEBUG_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars OUTPUT_FILE_EXTENSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_ICEBERG;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_SSL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_REPL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_IDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_SHORT_LOGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_REMOVE_LOGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_HOSTNAMES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_UTILIZATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEZ_TEST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_REPL_TEST_FILES_SORTED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEAUTO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXBYTES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXINPUTFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DROP_IGNORES_NON_EXISTENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIGNOREMAPJOINHINT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FILE_MAX_FOOTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_QUEUE_CAPACITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_BASE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_CLEAN_FREQ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_TTL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_FILE_PER_EVENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPBIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars YARNBIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDBIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FS_HAR_IMPL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMAXSPLITSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERNODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERRACK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPNUMREDUCERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREDBTYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREWAREHOUSE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_WAREHOUSE_EXTERNAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREURIS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESELECTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CAPABILITY_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CAPABILITIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_EXPIRY_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_INITIAL_CAPACITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_MAX_CAPACITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_STATS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FASTPATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FS_HANDLER_THREADS_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_HBASE_FILE_METADATA_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_URI_RESOLVER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTCONNECTIONRETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTFAILURERETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SERVER_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CONNECT_RETRY_DELAY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_LIFETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREPWD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLHOOK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREMULTITHREADED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLKEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DBACCESS_SSL_PROPS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERATTEMPTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERFORCERELOADCONF;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXMESSAGESIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMINTHREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXTHREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TCP_KEEP_ALIVE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_WM_DEFAULT_POOL_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ORIGINAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ARCHIVED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_EXTRACTED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_KEYTAB_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_SASL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_FRAMED_TRANSPORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_COMPACT_PROTOCOL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TOKEN_SIGNATURE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_PINOBJTYPES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DATANUCLEUS_INIT_COL_INFO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_COLUMNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_CONSTRAINTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_STORE_MANAGER_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTO_CREATE_ALL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_INFO_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTION_ISOLATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_IDENTIFIER_FACTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_LEGACY_VALUE_STRATEGY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_MAX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_OBJECTS_MAX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INIT_HOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PRE_EVENT_LISTENERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_LISTENERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTIONAL_EVENT_LISTENERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_LISTENER_TTL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_NOTIFICATION_API_AUTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_CLEAN_FREQ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_EXPIRY_DURATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_MESSAGE_FACTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXECUTE_SET_UGI;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PARTITION_NAME_WHITELIST_PATTERN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INTEGER_JDO_PUSHDOWN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL_DDL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_LIMIT_PARTITION_REQUEST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars NEWTABLEDEFAULTPARA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DDL_CTL_PARAMETERS_WHITELIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_RAW_STORE_IMPL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TXN_STORE_IMPL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_DRIVER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_MANAGER_FACTORY_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXPRESSION_PROXY_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DETACH_ALL_ON_COMMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_NON_TRANSACTIONAL_READ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_USER_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_END_FUNCTION_LISTENERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PART_INHERIT_TBL_PROPS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FILTER_HOOK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars FIRE_EVENTS_FOR_DML;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_FPP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_TTL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_METRICS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_USE_SSL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars METADATA_EXPORT_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MOVE_EXPORTED_METADATA_TO_TRASH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIIGNOREERRORS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPRINTCURRENTDB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPROMPT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_FS_HANDLER_CLS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONSILENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCAL_TIME_ZONE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_HISTORY_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYSTRING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYTAG;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOBNAMELENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJAR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEAUXJARS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVERELOADABLEJARS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDJARS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDARCHIVES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDFILESUSEHDFSLOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CURRENT_DATABASE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVES_AUTO_PROGRESS_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTAUTOPROGRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTIDENVVAR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTTRUNCATEENV;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPT_ENV_BLACKLIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_NO_PARTITION_FILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_TYPE_SAFETY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_CARTESIAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_BUCKETING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_TIMESTAMP_CONVERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DATA_OWNER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPREDMODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEALIAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPSIDEAGGREGATE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYSKEW;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINEMITINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINCACHESIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PUSH_RESIDUAL_INNER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_RANGECACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_COLLECT_STATISTICS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_FALLBACK_STRATEGY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_CNF_NODES_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RETPATH_HIVEOP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_EXTENDED_COST_MODEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_CPU;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_NET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_WRITE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_READ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_WRITE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_READ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RULE_EXCLUSION_REGEX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_SHOW_WARNINGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CARDINALITY_PRESERVING_JOIN_OPTIMIZATION_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE_UNIQUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMIJOIN_CONVERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COLUMN_ALIGNMENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SQL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SUBQUERY_SQL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_FILE_FORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_SERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_PUSHDOWN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_SAFE_PUSHDOWN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINBUCKETCACHESIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINUSEOPTIMIZEDTABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINPARALELHASHTABLETHREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEHYBRIDGRACEHASHJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINWBSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEWBSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINBLOOMFILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFULLOUTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMBJOINCACHEROWS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYMAPINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMEMORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRMEMORYTHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMULTIGROUPBYSINGLEREDUCER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAP_GROUPBY_SORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_NULLS_LAST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_POSITION_ALIAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORDERBY_POSITION_ALIAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_ORDERBY_POSITION_ALIAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NEW_JOB_GROUPING_SET_CARDINALITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_LIMIT_EXTRASTEP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXNUMFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUDTFAUTOPROGRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTFILEFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTMANAGEDFILEFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_STORAGE_HANDLER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYRESULTFILEFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECHECKFILEFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTRCFILESERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTSERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SERDESUSINGMETASTOREFORSCHEMA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHISTORYFILELOC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTSERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDREADER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDWRITER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTESCAPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEBINARYRECORDMAX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHADOOPMAXMEM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMALLTABLESFILESIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEMA_EVOLUTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_TABLE_SCAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars FILTER_DELETE_EVENTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLERANDOMNUM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXIMTESTMODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEPREFIX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODESAMPLEFREQ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODENOSAMPLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATAGGR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATPUB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTCURRENTTIMESTAMP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEROLLBACKTXN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILCOMPACTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILLOADDYNAMICPARTITION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILHEARTBEATER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TESTMODE_BUCKET_CODEC_VERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTEND_BUCKET_ID_RANGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEACIDKEYIDXSKIP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPREDFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGETEZFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESAVGSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGERCFILEBLOCKLEVEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEORCFILESTRIPELEVEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CODEC_POOL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEEXPLICITRCFILEHEADER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSERCFILESYNCCACHE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_COLUMN_NUMBER_CONF;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_TOLERATE_CORRUPTIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_BUFFER_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars PARQUET_MEMORY_POOL_RATIO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN_DEFAULT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_LEGACY_CONVERSION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_INFER_BINARY_AS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_WRITE_INT64_TIMESTAMP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_TIME_UNIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BASE_DELTA_RATIO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_STRATEGY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLASSLOADER_SHADE_PREFIX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_PPD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_USE_SOFT_REFERENCES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDYNAMICPARTITIONHASHJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_ANTI_JOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINUSENONSTAGED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINKEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINNUMMAPTASK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINMINSPLIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESENDHEARTBEAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITMAXROWSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTLIMITFILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTMAXFETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITPUSHDOWNMEMORYUSAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXENTRIESHASHTABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars XPRODSMALLTABLEROWSTHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXSHUFFLESIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEKEYCOUNTADJUSTMENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLETHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLELOADFACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEMAXMEMORYUSAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLESCALE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEINPUTFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZINPUTFORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCONTAINERSIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCPUVCORES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJAVAOPTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZLOGLEVEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZHS2USERACCESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJOBNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_PRUNING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEBUCKETINGSORTING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCESORTMERGEBUCKETMAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCEBUCKETMAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SORT_WHEN_BUCKETING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENFORCE_NOT_NULL_CONSTRAINT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_REDUCE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTOPERATORTRUST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEROWOFFSET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTINDEXFILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_WINDOWING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDRECOGNIZETRANSITIVITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDREMOVEDUPLICATEFILTERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZERMIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPT_TRANSFORM_IN_MAXNODES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTDISTINCTOPTIMIZER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONCOLUMNSEPARATOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCONSTANTPROPAGATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIDENTITYPROJECTREMOVER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMETADATAONLYQUERIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVENULLSCANOPTIMIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_STORAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTGROUPBY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTBUCKETMAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTMERGEBUCKETMAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATIONMINREDUCER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTJOINREDUCEDEDUPLICATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGFORORDERBY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGNUMBERFORORDERBY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGPERCENTFORORDERBY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_ORDERBY_IN_SUBQUERY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEDISTINCTREWRITE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_UNION_REMOVE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCORRELATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_CONSTRAINTS_JOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SORT_PREDS_WITH_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_REDUCE_WITH_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TOPNKEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_TOPN_ALLOWED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_CHECK_BATCHES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_MAX_NUMBER_OF_PARTITIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_OPTIMIZATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_MERGE_TS_SCHEMA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_OPTIMIZATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_MERGE_EVENTOPS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DOWNSTREAM_MERGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_PARALLEL_EDGE_SUPPORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_SQ_COUNT_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SCAN_PROBEDECODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_HMS_QUERY_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_VIEW_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_FULL_AGGREGATE_ONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNTDISTINCT_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNT_DISTINCT_SKETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_SKETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_SKETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_SKETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_SKETCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATE_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ESTIMATE_PERC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_NDV_READJUSTMENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NUM_NULLS_ESTIMATE_PERC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSAUTOGATHER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSCOLAUTOGATHER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSDBCLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_PUBLISHER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_AGGREGATOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENT_STATS_COUNTERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RELIABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_PART_LEVEL_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_GATHER_NUM_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_TABLEKEYS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_SCANCOLS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ALGO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_BITVECTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ERROR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATORS_ENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_TUNER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_KEY_PREFIX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_VARIABLE_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_LIST_NUM_ENTRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAP_NUM_ENTRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_COLUMN_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_CORRELATED_MULTI_KEY_JOINS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RANGE_SELECTIVITY_UNIFORM_DISTRIBUTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DESERIALIZATION_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_CLAUSE_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_MIN_RATIO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_UDTF_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_USE_BITVECTORS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_NUM_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_THRIFT_CLIENT_MAX_MESSAGE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_CONCURRENCY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_NUMRETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_UNLOCK_NUMRETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_SLEEP_BETWEEN_RETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MAPRED_ONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_QUERY_STRING_MAX_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_ALLOW_ORIGINALS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_FILE_MOVE_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_USE_KERBEROS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_QUORUM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLIENT_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SESSION_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_ENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_ENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_EXT_LOCKING_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STRICT_LOCKING_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_NONACID_READ_LOCKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READ_LOCKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCKS_PARTITION_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_OVERWRITE_X_LOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MERGE_INSERT_X_LOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_WRITE_X_LOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STATS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_DIR_CACHE_DURATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_ACID_VERSION_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READONLY_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_LOCKLESS_READS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_CREATE_TABLE_USE_SUFFIX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_TRUNCATE_USE_BASE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DROP_PARTITION_USE_BASE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_RENAME_PARTITION_MAKE_COPY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_MAX_CACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_REPORTING_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_NUM_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_OBSOLETE_DELTA_NUM_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_PCT_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_OPERATIONAL_PROPERTIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_OPEN_TXNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COUNT_OPEN_TXNS_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_OPEN_BATCH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_RETRYABLE_SQLEX_REGEX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_ON;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_DURATION_UPDATE_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_DURATION_UPDATE_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_REQUEST_QUEUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_NUM_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_PCT_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_MAX_NUM_DELTA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_TIME_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACTIVE_DELTA_DIR_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_OBSOLETE_DELTA_DIR_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_SMALL_DELTA_DIR_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACID_METRICS_LOGGER_FREQUENCY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WAIT_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MR_COMPACTOR_GATHER_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_GATHER_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_INITIATOR_FAILED_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RUN_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RETENTION_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_THREADS_NUM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_JOB_QUEUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TRANSACTIONAL_CONCATENATE_NOBLOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CONCATENATE_EXTERNAL_TABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_COMPACT_MM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_CRUD_QUERY_BASED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_GROUPING_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_SUCCEEDED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_FAILED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_ATTEMPTED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_REAPER_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TIMEDOUT_TXN_REAPER_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars WRITE_SET_REAPER_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_CARDINALITY_VIOLATION_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_UPDATE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_SPLIT_UPDATE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars OPTIMIZE_ACID_META_COLUMNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_ROOT_ALLOCATOR_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_ALLOCATOR_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_INDEXING_GRANULARITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_PARTITION_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_ROW_IN_MEMORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BROKER_DEFAULT_ADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SELECT_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_NUM_HTTP_CONNECTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_HTTP_READ_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SLEEP_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BASE_PERSIST_DIRECTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_ROLLUP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_BASE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_USERNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_URI;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_WORKING_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_TRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_PASSIVE_WAIT_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BITMAP_FACTORY_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_KERBEROS_ENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_WAL_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_GENERATE_HFILES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_RESTORE_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_HBASE_URLENCODE_AUTHORIZATION_URI;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_KUDU_MASTER_ADDRESSES_DEFAULT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEARCHIVEENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCACHING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSIONTHRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKAGGR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEMETADATAQUERIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHOUTPUTSERDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPREVALUATIONCACHE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTEDEPTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONFVALIDATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMANTIC_ANALYZER_HOOK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHENTICATOR_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_AUTH_READS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHENTICATOR_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_USER_GRANTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TASK_FACTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLES_ON_STORAGEHANDLERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_HEADER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_ESCAPE_CRLF;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_TEZ_SESSION_ASYNC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_MANAGED_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTERNALTABLE_PURGE_DEFAULT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ERROR_ON_EMPTY_PARTITION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_URI_SCHEME_WL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_TASK_FACTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FILTER_TRANSACTIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REWORK_MAPREDWORK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_EXCEPTION_HANDLERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG4J_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_LOG4J_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_LOG_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_TO_CONSOLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_INCLUDE_EXTENDED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXPLAIN_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CODAHALE_METRICS_REPORTER_CLASSES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_REPORTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_COMPONENT_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PERF_LOGGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_START_CLEANUP_SCRATCHDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCRATCH_DIR_LOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_MULTILEVEL_DIRS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTAS_EXTERNAL_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_EXTERNAL_TABLES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEMPORARY_TABLE_STORAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_LIFETIME_HOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRIVER_RUN_HOOKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DDL_OUTPUT_FORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENTITY_SEPARATOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CAPTURE_TRANSFORM_ENTITY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LINEAGE_INFO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SSL_PROTOCOL_BLACKLIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MAX_START_ATTEMPTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_PROXY_TRUSTHEADER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRANSPORT_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_BIND_HOST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_COMPILE_LOCK_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_OPS_IN_SESSION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_REFRESH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_BIND_HOST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SSL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_EXCLUDE_CIPHERSUITES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYMANAGERFACTORY_ALGORITHM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SPNEGO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_PAM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_GRAPH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_ENABLE_CORS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_VALUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SHOW_OPERATION_DRILLDOWN_LINK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_WORKER_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_POOL_METRICS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_VALIDATOR_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_DEFAULT_QUEUES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LEVEL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_PURGEPOLICY_TIMETOLIVE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_FETCH_MAXBYTES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_METRICS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_SASL_QOP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LONG_POLLING_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_CLASSNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN_USE_XFF_HEADER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ALLOW_USER_SUBSTITUTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_KEYTAB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_KEYTAB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_URL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BASEDN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_DOMAIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERFILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GUIDKEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PAM_SERVICES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_URL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_SKIP_SSL_CERT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_PRIVATE_KEY_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_IDP_METADATA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SP_ID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_FORCE_AUTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_AUTHENTICATION_LIFETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_BLACKLISTED_SIGNATURE_ALGORITHMS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_ACS_INDEX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_URL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_WANT_ASSERTIONS_SIGNED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SIGN_REQUESTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_TOKEN_TTL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_ATTRIBUTE_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_FILTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ENABLE_DOAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SERVICE_USERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISTCP_DOAS_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TABLE_TYPE_MAPPING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_HOOK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_USE_SSL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_TYPE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYMANAGERFACTORY_ALGORITHM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_HTTP_EXCLUDE_CIPHERSUITES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_BINARY_INCLUDE_CIPHERSUITES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_WHITELIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_BLACKLIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ALLOW_UDF_LOAD_ON_DEMAND;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_OPERATION_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_PASSWORD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_XSRF_FILTER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_COMMAND_WHITELIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GRACEFUL_STOP_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MOVE_FILES_THREAD_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTLISTBUCKETING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_READ_SOCKET_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_TCP_KEEP_ALIVE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DECODE_PARTITION_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_ENGINE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JAR_DIRECTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_USER_INSTALL_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MASKING_ALGO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_MAXENTRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_ROW_DESERIALIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTOR_ADAPTOR_USAGE_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_IF_EXPR_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TYPE_CHECK_ON_INSERT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HADOOP_CLASSPATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RPC_QUERY_PLAN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PLAN_MAPWORK_SERIALIZATION_SKIP_PROPERTIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AM_SPLIT_GENERATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SPLITS_AVAILABLE_SLOTS_CALCULATOR_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_GENERATE_CONSISTENT_SPLITS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_NUM_CONTAINERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTAGEIDREARRANGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEGOOGLEREGEXENGINE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERGROUP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUOTEDID_SUPPORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLE_AS_EXTERNAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLES_AS_ACID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CREATE_TABLES_AS_INSERT_ONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DIRECT_INSERT_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_CTAS_X_LOCK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars USERS_IN_ADMIN_ROLE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_BMJ_USE_SUBCACHE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CHECK_CROSS_PRODUCT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_AUTO_REDUCER_PARALLELISM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAPREDUCE_OUTPUT_COMMITTER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_PARTITION_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_PARTITION_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_BLOOM_FILTER_ENTRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_BLOOM_FILTER_ENTRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_MERGE_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SMB_NUMBER_WAVES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_SUMMARY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SESSION_EVENTS_SUMMARY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_INPLACE_PROGRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_INPLACE_PROGRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DAG_STATUS_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_ONLY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ROW_WRAPPER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ACID_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_ALWAYS_DUMP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_NONVECTOR_WRAPPER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MIN_ALLOC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_ALLOC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_ARENA_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MAX_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DIRECT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_PREALLOCATE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DISCARD_METHOD;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DEFRAG_HEADROOM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_FORCE_EVICTED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TRACK_CACHE_USAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_LRFU;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_LAMBDA;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_HOTBUFFERS_PERCENTAGE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_BP_WRAPPER_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ALLOW_SYNTHETIC_FILEID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_DEFAULT_FS_FILE_ID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ENABLE_ORC_GAP_CACHE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_STRATEGY_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_SAVE_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_USE_FILEID_PATH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_FORMATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ALLOC_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_ROW_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_LRR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ORC_ENABLE_TIME_COUNTERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MAX;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MIN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CVB_BUFFERED_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_SWEEP_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_INSTANT_DEALLOC;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_DELETEDELTAS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PATH_CACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_SHARE_OBJECT_POOLS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ALLOW_UBER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_TREE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_VECTORIZED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_INPUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_OUTPUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SKIP_COMPILE_UDF_CHECK;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOW_PERMANENT_FNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXECUTION_MODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ETL_SKIP_FORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OBJECT_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_THREADPOOL_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_THREADPOOL_MULTIPLIER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_KERBEROS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_KEYTAB_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_KEYTAB_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_KEYTAB_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_CONNECTION_STRING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_SESSION_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_USER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL_DENY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL_DENY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL_DENY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_REMOTE_TOKEN_REQUIRES_SIGNING;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DELEGATION_TOKEN_LIFETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_RPC_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEB_AUTO_AUTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_NUM_HANDLERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_NUM_HANDLERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HDFS_PACKAGE_DIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WORK_DIRS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_SHUFFLE_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_CONTAINER_MB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_QUEUE_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_CONTAINER_ID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NM_ADDRESS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_USE_FQDN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_EXEC_USE_FQDN;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_EXECUTORS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_REPORTER_MAX_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_MEMORY_PER_INSTANCE_MB;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_XMX_HEADROOM;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_VCPUS_PER_INSTANCE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_FILE_CLEANER_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FILE_CLEANUP_DELAY_SECONDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_HOSTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_REFRESH_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_COMMUNICATOR_NUM_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_CLIENT_NUM_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_LISTENER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MINTASKS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MININTERVALDURATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_TASKTIMERATIO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_EXECUTORRATIO;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MAXNODES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_LOCALITY_DELAY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_DATA_POINTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_WINDOW_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_SIMPLE_AVERAGE_DATA_POINTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAX_CONCURRENT_REQUESTS_PER_NODE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_UMBILICAL_SERVER_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_SSL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_VALUE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CLIENT_CONSISTENT_SPLITS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SPLIT_LOCATION_PROVIDER_CLASS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_VALIDATE_ACLS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_USE_HYBRID_CALENDAR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_DEPLOYMENT_SETUP_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_RPC_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_OUTPUT_SERVICE_PORT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET_PROVIDER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ENABLE_GRACE_JOIN_IN_LLAP;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HS2_ENABLE_COORDINATOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_LOGGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OUTPUT_FORMAT_ARROW;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_COLLECT_LOCK_METRICS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_TIME_SUMMARY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRIGGER_VALIDATION_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars NWAYJOINREORDER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MERGE_NWAY_JOINS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_N_RECORDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_PATH_VALIDATION;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LLAP_CONCURRENT_QUERIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_ENABLE_MEMORY_MANAGER;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HASH_TABLE_INFLATION_FACTOR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_TRACE_ID;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_AVOID_GLOBSTATUS_ON_S3;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_RESTRICTED_LIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_HIDDEN_LIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_INTERNAL_VARIABLE_LIST;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_LENGTH;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_TIMEOUT_SECONDS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPUTE_SPLITS_NUM_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_INPUT_LISTING_MAX_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STRATEGIES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_RETRYSNAPSHOT_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_REEXECUTION_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_PLANMAPPER_LINK_RELNODES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_RECOMPILATION_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_NAMESPACE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_IDLE_SLEEP_TIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_PROGRESS_REPORT_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_CREATE_AS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_AUTHORIZATION_SCHEDULED_QUERIES_SUPPORTED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_MAX_EXECUTORS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_THREAD_COUNT;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_QUEUE_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_DIRECTORY;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_POLL_INTERVAL;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_CONSUMERS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DESCRIBE_PARTITIONED_TABLE_IGNORE_STATS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ICEBERG_METADATA_GENERATOR_THREADS;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_SUPPORTED_SCHEMES;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED;
public static final enum org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_CONFIG_FILES;
public final java.lang.String varname;
public final java.lang.String altName;
private final java.lang.String defaultExpr;
public final java.lang.String defaultStrVal;
public final int defaultIntVal;
public final long defaultLongVal;
public final float defaultFloatVal;
public final boolean defaultBoolVal;
private final java.lang.Class valClass;
private final org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType;
private final org.apache.hadoop.hive.conf.Validator validator;
private final java.lang.String description;
private final boolean excluded;
private final boolean caseSensitive;
private static final org.apache.hadoop.hive.conf.HiveConf$ConfVars[] $VALUES;
public static org.apache.hadoop.hive.conf.HiveConf$ConfVars[] values()
{
java.lang.Object v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars[] v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars[] $VALUES>;
v = virtualinvoke v.<java.lang.Object: java.lang.Object clone()>();
return v;
}
public static org.apache.hadoop.hive.conf.HiveConf$ConfVars valueOf(java.lang.String)
{
java.lang.String v;
java.lang.Enum v;
v := @parameter: java.lang.String;
v = staticinvoke <java.lang.Enum: java.lang.Enum valueOf(java.lang.Class,java.lang.String)>(class "Lorg/apache/hadoop/hive/conf/HiveConf$ConfVars;", v);
return v;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, java.lang.String)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: java.lang.String;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, null, v, 1, 0, null);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, java.lang.String, java.lang.String)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: java.lang.String;
v := @parameter: java.lang.String;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, null, v, 1, 0, v);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, org.apache.hadoop.hive.conf.Validator, java.lang.String, java.lang.String)
{
org.apache.hadoop.hive.conf.Validator v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: org.apache.hadoop.hive.conf.Validator;
v := @parameter: java.lang.String;
v := @parameter: java.lang.String;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, v, v, 1, 0, v);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, java.lang.String, boolean)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: java.lang.String;
v := @parameter: boolean;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, null, v, 1, v, null);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.String, boolean, java.lang.String)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.String v, v, v, v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.String;
v := @parameter: boolean;
v := @parameter: java.lang.String;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, null, v, v, 0, null);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, org.apache.hadoop.hive.conf.Validator, java.lang.String)
{
org.apache.hadoop.hive.conf.Validator v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: org.apache.hadoop.hive.conf.Validator;
v := @parameter: java.lang.String;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, v, v, 1, 0, null);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, org.apache.hadoop.hive.conf.Validator, java.lang.String, boolean)
{
org.apache.hadoop.hive.conf.Validator v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v;
java.lang.Object v;
java.lang.String v, v, v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: org.apache.hadoop.hive.conf.Validator;
v := @parameter: java.lang.String;
v := @parameter: boolean;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean,boolean,java.lang.String)>(v, v, v, v, v, v, 1, v, null);
return;
}
private void <init>(java.lang.String, int, java.lang.String, java.lang.Object, org.apache.hadoop.hive.conf.Validator, java.lang.String, boolean, boolean, java.lang.String)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType v, v, v, v, v;
boolean v, v, v, v, v, v, v, v;
float v;
org.apache.hadoop.hive.conf.Validator v;
long v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
int v, v, v, v, v, v;
java.lang.String v, v, v, v, v, v, v;
java.lang.IllegalArgumentException v;
java.lang.Class v;
java.lang.Object v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v := @parameter: int;
v := @parameter: java.lang.String;
v := @parameter: java.lang.Object;
v := @parameter: org.apache.hadoop.hive.conf.Validator;
v := @parameter: java.lang.String;
v := @parameter: boolean;
v := @parameter: boolean;
v := @parameter: java.lang.String;
specialinvoke v.<java.lang.Enum: void <init>(java.lang.String,int)>(v, v);
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String description> = v;
if v != null goto label;
v = null;
goto label;
label:
v = staticinvoke <java.lang.String: java.lang.String valueOf(java.lang.Object)>(v);
label:
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultExpr> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean excluded> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean caseSensitive> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String altName> = v;
if v == null goto label;
v = v instanceof java.lang.String;
if v == 0 goto label;
label:
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.Class valClass> = class "Ljava/lang/String;";
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType STRING>;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType> = v;
v = staticinvoke <org.apache.hadoop.hive.conf.SystemVariables: java.lang.String substitute(java.lang.String)>(v);
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultStrVal> = v;
v = (int) -1;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: int defaultIntVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: long defaultLongVal> = -1L;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: float defaultFloatVal> = -1.0F;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean defaultBoolVal> = 0;
goto label;
label:
v = v instanceof java.lang.Integer;
if v == 0 goto label;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.Class valClass> = class "Ljava/lang/Integer;";
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType INT>;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultStrVal> = null;
v = virtualinvoke v.<java.lang.Integer: int intValue()>();
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: int defaultIntVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: long defaultLongVal> = -1L;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: float defaultFloatVal> = -1.0F;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean defaultBoolVal> = 0;
goto label;
label:
v = v instanceof java.lang.Long;
if v == 0 goto label;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.Class valClass> = class "Ljava/lang/Long;";
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType LONG>;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultStrVal> = null;
v = (int) -1;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: int defaultIntVal> = v;
v = virtualinvoke v.<java.lang.Long: long longValue()>();
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: long defaultLongVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: float defaultFloatVal> = -1.0F;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean defaultBoolVal> = 0;
goto label;
label:
v = v instanceof java.lang.Float;
if v == 0 goto label;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.Class valClass> = class "Ljava/lang/Float;";
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType FLOAT>;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultStrVal> = null;
v = (int) -1;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: int defaultIntVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: long defaultLongVal> = -1L;
v = virtualinvoke v.<java.lang.Float: float floatValue()>();
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: float defaultFloatVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean defaultBoolVal> = 0;
goto label;
label:
v = v instanceof java.lang.Boolean;
if v == 0 goto label;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.Class valClass> = class "Ljava/lang/Boolean;";
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType BOOLEAN>;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultStrVal> = null;
v = (int) -1;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: int defaultIntVal> = v;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: long defaultLongVal> = -1L;
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: float defaultFloatVal> = -1.0F;
v = virtualinvoke v.<java.lang.Boolean: boolean booleanValue()>();
v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean defaultBoolVal> = v;
goto label;
label:
v = new java.lang.IllegalArgumentException;
v = virtualinvoke v.<java.lang.Object: java.lang.Class getClass()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.Class,java.lang.String)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Not supported type value \u for name \u0001");
specialinvoke v.<java.lang.IllegalArgumentException: void <init>(java.lang.String)>(v);
throw v;
label:
return;
}
public boolean isType(java.lang.String)
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: boolean isType(java.lang.String)>(v);
return v;
}
public org.apache.hadoop.hive.conf.Validator getValidator()
{
org.apache.hadoop.hive.conf.Validator v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
return v;
}
public java.lang.String validate(java.lang.String)
{
org.apache.hadoop.hive.conf.Validator v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v := @parameter: java.lang.String;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
if v != null goto label;
v = null;
goto label;
label:
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
v = interfaceinvoke v.<org.apache.hadoop.hive.conf.Validator: java.lang.String validate(java.lang.String)>(v);
label:
return v;
}
public java.lang.String validatorDescription()
{
org.apache.hadoop.hive.conf.Validator v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
if v != null goto label;
v = null;
goto label;
label:
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
v = interfaceinvoke v.<org.apache.hadoop.hive.conf.Validator: java.lang.String toDescription()>();
label:
return v;
}
public java.lang.String typeString()
{
org.apache.hadoop.hive.conf.Validator v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: java.lang.String typeString()>();
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType STRING>;
if v != v goto label;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
if v == null goto label;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
v = v instanceof org.apache.hadoop.hive.conf.Validator$TimeValidator;
if v == 0 goto label;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u0001(TIME)");
label:
return v;
}
public java.lang.String getRawDescription()
{
java.lang.String v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String description>;
return v;
}
public java.lang.String getDescription()
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v, v, v, v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String validatorDescription()>();
if v == null goto label;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String description>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u.\n\u0001");
return v;
label:
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String description>;
return v;
}
public boolean isExcluded()
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean excluded>;
return v;
}
public boolean isCaseSensitive()
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: boolean caseSensitive>;
return v;
}
public java.lang.String toString()
{
java.lang.String v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
return v;
}
private static java.lang.String findHadoopBinary()
{
java.lang.String v, v, v, v, v, v;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findHadoopHome()>();
if v != null goto label;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u0001usr");
goto label;
label:
v = v;
label:
v = <java.io.File: java.lang.String separator>;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String,java.lang.String)>(v, v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u0001\u0001bin\u0001hadoop");
return v;
}
private static java.lang.String findYarnBinary()
{
java.lang.String v, v, v, v;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findHadoopHome()>();
if v != null goto label;
v = "yarn";
goto label;
label:
v = <java.io.File: java.lang.String separator>;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String,java.lang.String)>(v, v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u0001\u0001bin\u0001yarn");
label:
return v;
}
private static java.lang.String findMapRedBinary()
{
java.lang.String v, v, v, v;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findHadoopHome()>();
if v != null goto label;
v = "mapred";
goto label;
label:
v = <java.io.File: java.lang.String separator>;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String,java.lang.String)>(v, v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u0001\u0001bin\u0001mapred");
label:
return v;
}
private static java.lang.String findHadoopHome()
{
java.lang.String v;
v = staticinvoke <java.lang.System: java.lang.String getenv(java.lang.String)>("HADOOP_HOME");
if v != null goto label;
v = staticinvoke <java.lang.System: java.lang.String getenv(java.lang.String)>("HADOOP_PREFIX");
label:
return v;
}
public java.lang.String getDefaultValue()
{
java.lang.String v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType valType>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars$VarType: java.lang.String defaultValueString(org.apache.hadoop.hive.conf.HiveConf$ConfVars)>(v);
return v;
}
public java.lang.String getDefaultExpr()
{
java.lang.String v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String defaultExpr>;
return v;
}
private java.util.Set getValidStringValues()
{
org.apache.hadoop.hive.conf.Validator v, v, v;
java.util.Set v;
java.lang.RuntimeException v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v;
java.lang.String v, v;
boolean v;
v := @this: org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
if v == null goto label;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
v = v instanceof org.apache.hadoop.hive.conf.Validator$StringSet;
if v != 0 goto label;
label:
v = new java.lang.RuntimeException;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u does not specify a list of valid values");
specialinvoke v.<java.lang.RuntimeException: void <init>(java.lang.String)>(v);
throw v;
label:
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.Validator validator>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: java.util.Set getExpected()>();
return v;
}
private static org.apache.hadoop.hive.conf.HiveConf$ConfVars[] $values()
{
org.apache.hadoop.hive.conf.HiveConf$ConfVars[] v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
v = newarray (org.apache.hadoop.hive.conf.HiveConf$ConfVars)[1324];
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_ENABLED>;
v[0] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_MAX_SIZE>;
v[1] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_RECORD_STATS>;
v[2] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTWRAPPER>;
v[3] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PLAN>;
v[4] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STAGINGDIR>;
v[5] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIR>;
v[6] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLDIR>;
v[7] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENABLED>;
v[8] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMDIR>;
v[9] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMRETIAN>;
v[10] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENCRYPTEDDIR>;
v[11] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMFALLBACKNONENCRYPTEDDIR>;
v[12] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMINTERVAL>;
v[13] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE>;
v[14] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE_NAME>;
v[15] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FUNCTIONS_ROOT_DIR>;
v[16] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_APPROX_MAX_LOAD_TASKS>;
v[17] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARTITIONS_DUMP_PARALLELISM>;
v[18] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RUN_DATA_COPY_TASKS_ON_TARGET>;
v[19] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY>;
v[20] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR>;
v[21] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR_COUNT>;
v[22] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET>;
v[23] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_MATERIALIZED_VIEWS>;
v[24] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY>;
v[25] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE>;
v[26] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_ACID_TABLES>;
v[27] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT>;
v[28] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_ABORT_WRITE_TXN_AFTER_TIMEOUT>;
v[29] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ADD_RAW_RESERVED_NAMESPACE>;
v[30] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_EXTERNAL_TABLES>;
v[31] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_EXTERNAL_TABLES>;
v[32] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_TABLE_BASE_DIR>;
v[33] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK>;
v[34] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS>;
v[35] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_AUTHORIZATION_METADATA>;
v[36] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_AUTHORIZATION_PROVIDER_SERVICE>;
v[37] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_HANDLE_DENY_POLICY_TARGET>;
v[38] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_FAILOVER_START>;
v[39] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_ADD_DENY_POLICY_TARGET>;
v[40] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_CLIENT_READ_TIMEOUT>;
v[41] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_ATLAS_METADATA>;
v[42] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_ENDPOINT>;
v[43] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_REPLICATED_TO_DB>;
v[44] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_CLIENT_READ_TIMEOUT>;
v[45] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_CLIENT_CONNECT_TIMEOUT>;
v[46] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SOURCE_CLUSTER_NAME>;
v[47] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_TARGET_CLUSTER_NAME>;
v[48] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_INTIAL_DELAY>;
v[49] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_BACKOFF_COEFFICIENT>;
v[50] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_JITTER>;
v[51] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_MAX_DELAY_BETWEEN_RETRIES>;
v[52] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_TOTAL_DURATION>;
v[53] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_COPY_FILE_LIST_ITERATOR_RETRY>;
v[54] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_BATCH_SIZE>;
v[55] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_WITH_DATA_COPY_BATCH_SIZE>;
v[56] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARALLEL_COPY_TASKS>;
v[57] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY>;
v[58] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_OVERWRITE_TARGET_FOR_EXTERNAL_TABLE_COPY>;
v[59] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_STATS_TOP_EVENTS_COUNTS>;
v[60] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALSCRATCHDIR>;
v[61] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DOWNLOADED_RESOURCES_DIR>;
v[62] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIRPERMISSION>;
v[63] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITVIACHILD>;
v[64] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITLOCALTASKVIACHILD>;
v[65] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTERRORLIMIT>;
v[66] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars ALLOWPARTIALCONSUMP>;
v[67] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERPERFIX>;
v[68] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERENABLED>;
v[69] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSRESULT>;
v[70] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATE>;
v[71] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATECODEC>;
v[72] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATETYPE>;
v[73] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars BYTESPERREDUCER>;
v[74] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXREDUCERS>;
v[75] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PREEXECHOOKS>;
v[76] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars POSTEXECHOOKS>;
v[77] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars ONFAILUREHOOKS>;
v[78] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars QUERYREDACTORHOOKS>;
v[79] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENTSTATSPUBLISHERS>;
v[80] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars BASICSTATSTASKSMAXTHREADSFACTOR>;
v[81] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLEL>;
v[82] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLETHREADNUMBER>;
v[83] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESPECULATIVEEXECREDUCERS>;
v[84] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERSPULLINTERVAL>;
v[85] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONING>;
v[86] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONINGMODE>;
v[87] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTS>;
v[88] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTSPERNODE>;
v[89] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONCONVERT>;
v[90] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXCREATEDFILES>;
v[91] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULTPARTITIONNAME>;
v[92] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULT_ZOOKEEPER_PARTITION_NAME>;
v[93] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SHOW_JOB_FAIL_DEBUG_INFO>;
v[94] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_CAPTURE_STACKTRACES>;
v[95] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_TIMEOUT>;
v[96] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TASKLOG_DEBUG_TIMEOUT>;
v[97] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars OUTPUT_FILE_EXTENSION>;
v[98] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST>;
v[99] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_ICEBERG>;
v[100] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_SSL>;
v[101] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_REPL>;
v[102] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_IDE>;
v[103] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_SHORT_LOGS>;
v[104] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_REMOVE_LOGS>;
v[105] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_HOSTNAMES>;
v[106] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_INTERVAL>;
v[107] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_UTILIZATION>;
v[108] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEZ_TEST>;
v[109] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD>;
v[110] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN>;
v[111] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT>;
v[112] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_REPL_TEST_FILES_SORTED>;
v[113] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEAUTO>;
v[114] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXBYTES>;
v[115] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXINPUTFILES>;
v[116] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DROP_IGNORES_NON_EXISTENT>;
v[117] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIGNOREMAPJOINHINT>;
v[118] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FILE_MAX_FOOTER>;
v[119] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES>;
v[120] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_QUEUE_CAPACITY>;
v[121] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_BASE_PATH>;
v[122] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL>;
v[123] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_CLEAN_FREQ>;
v[124] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_TTL>;
v[125] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_FILE_PER_EVENT>;
v[126] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPBIN>;
v[127] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars YARNBIN>;
v[128] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDBIN>;
v[129] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FS_HAR_IMPL>;
v[130] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMAXSPLITSIZE>;
v[131] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZE>;
v[132] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERNODE>;
v[133] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERRACK>;
v[134] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPNUMREDUCERS>;
v[135] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREDBTYPE>;
v[136] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREWAREHOUSE>;
v[137] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_WAREHOUSE_EXTERNAL>;
v[138] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREURIS>;
v[139] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESELECTION>;
v[140] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CAPABILITY_CHECK>;
v[141] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CAPABILITIES>;
v[142] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_ENABLED>;
v[143] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_EXPIRY_TIME>;
v[144] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_INITIAL_CAPACITY>;
v[145] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_MAX_CAPACITY>;
v[146] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_STATS_ENABLED>;
v[147] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FASTPATH>;
v[148] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FS_HANDLER_THREADS_COUNT>;
v[149] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_HBASE_FILE_METADATA_THREADS>;
v[150] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_URI_RESOLVER>;
v[151] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTCONNECTIONRETRIES>;
v[152] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTFAILURERETRIES>;
v[153] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SERVER_PORT>;
v[154] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CONNECT_RETRY_DELAY>;
v[155] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_TIMEOUT>;
v[156] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_LIFETIME>;
v[157] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREPWD>;
v[158] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLHOOK>;
v[159] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREMULTITHREADED>;
v[160] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLKEY>;
v[161] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DBACCESS_SSL_PROPS>;
v[162] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERATTEMPTS>;
v[163] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERINTERVAL>;
v[164] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERFORCERELOADCONF>;
v[165] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXMESSAGESIZE>;
v[166] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMINTHREADS>;
v[167] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXTHREADS>;
v[168] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TCP_KEEP_ALIVE>;
v[169] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_WM_DEFAULT_POOL_SIZE>;
v[170] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ORIGINAL>;
v[171] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ARCHIVED>;
v[172] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_EXTRACTED>;
v[173] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_KEYTAB_FILE>;
v[174] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_PRINCIPAL>;
v[175] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_KERBEROS_PRINCIPAL>;
v[176] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_SASL>;
v[177] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_FRAMED_TRANSPORT>;
v[178] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_COMPACT_PROTOCOL>;
v[179] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TOKEN_SIGNATURE>;
v[180] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS>;
v[181] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR>;
v[182] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE>;
v[183] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL>;
v[184] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_PINOBJTYPES>;
v[185] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_TYPE>;
v[186] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS>;
v[187] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DATANUCLEUS_INIT_COL_INFO>;
v[188] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_TABLES>;
v[189] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_COLUMNS>;
v[190] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_CONSTRAINTS>;
v[191] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_STORE_MANAGER_TYPE>;
v[192] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTO_CREATE_ALL>;
v[193] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION>;
v[194] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION>;
v[195] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_INFO_CLASS>;
v[196] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTION_ISOLATION>;
v[197] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2>;
v[198] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2_TYPE>;
v[199] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_IDENTIFIER_FACTORY>;
v[200] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_LEGACY_VALUE_STRATEGY>;
v[201] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK>;
v[202] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_MAX>;
v[203] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_OBJECTS_MAX>;
v[204] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INIT_HOOKS>;
v[205] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PRE_EVENT_LISTENERS>;
v[206] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_LISTENERS>;
v[207] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE>;
v[208] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTIONAL_EVENT_LISTENERS>;
v[209] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES>;
v[210] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL>;
v[211] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_LISTENER_TTL>;
v[212] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_NOTIFICATION_API_AUTH>;
v[213] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS>;
v[214] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK>;
v[215] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_CLEAN_FREQ>;
v[216] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_EXPIRY_DURATION>;
v[217] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_MESSAGE_FACTORY>;
v[218] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXECUTE_SET_UGI>;
v[219] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PARTITION_NAME_WHITELIST_PATTERN>;
v[220] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INTEGER_JDO_PUSHDOWN>;
v[221] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL>;
v[222] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE>;
v[223] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL_DDL>;
v[224] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH>;
v[225] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE>;
v[226] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE>;
v[227] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS>;
v[228] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES>;
v[229] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_LIMIT_PARTITION_REQUEST>;
v[230] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NEWTABLEDEFAULTPARA>;
v[231] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DDL_CTL_PARAMETERS_WHITELIST>;
v[232] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_RAW_STORE_IMPL>;
v[233] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TXN_STORE_IMPL>;
v[234] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_DRIVER>;
v[235] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_MANAGER_FACTORY_CLASS>;
v[236] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXPRESSION_PROXY_CLASS>;
v[237] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DETACH_ALL_ON_COMMIT>;
v[238] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_NON_TRANSACTIONAL_READ>;
v[239] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_USER_NAME>;
v[240] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_END_FUNCTION_LISTENERS>;
v[241] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PART_INHERIT_TBL_PROPS>;
v[242] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FILTER_HOOK>;
v[243] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars FIRE_EVENTS_FOR_DML>;
v[244] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS>;
v[245] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_ENABLED>;
v[246] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_SIZE>;
v[247] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS>;
v[248] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_FPP>;
v[249] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE>;
v[250] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_TTL>;
v[251] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT>;
v[252] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT>;
v[253] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL>;
v[254] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL>;
v[255] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_METRICS>;
v[256] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_USE_SSL>;
v[257] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PATH>;
v[258] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PASSWORD>;
v[259] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PATH>;
v[260] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD>;
v[261] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METADATA_EXPORT_LOCATION>;
v[262] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MOVE_EXPORTED_METADATA_TO_TRASH>;
v[263] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIIGNOREERRORS>;
v[264] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPRINTCURRENTDB>;
v[265] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPROMPT>;
v[266] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_FS_HANDLER_CLS>;
v[267] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONID>;
v[268] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONSILENT>;
v[269] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCAL_TIME_ZONE>;
v[270] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_HISTORY_ENABLED>;
v[271] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYSTRING>;
v[272] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYID>;
v[273] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYTAG>;
v[274] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOBNAMELENGTH>;
v[275] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJAR>;
v[276] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEAUXJARS>;
v[277] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVERELOADABLEJARS>;
v[278] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDFILES>;
v[279] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDJARS>;
v[280] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDARCHIVES>;
v[281] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDFILESUSEHDFSLOCATION>;
v[282] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CURRENT_DATABASE>;
v[283] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVES_AUTO_PROGRESS_TIMEOUT>;
v[284] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTAUTOPROGRESS>;
v[285] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTIDENVVAR>;
v[286] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTTRUNCATEENV>;
v[287] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPT_ENV_BLACKLIST>;
v[288] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT>;
v[289] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_NO_PARTITION_FILTER>;
v[290] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_TYPE_SAFETY>;
v[291] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_CARTESIAN>;
v[292] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_BUCKETING>;
v[293] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_TIMESTAMP_CONVERSION>;
v[294] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DATA_OWNER>;
v[295] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPREDMODE>;
v[296] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEALIAS>;
v[297] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPSIDEAGGREGATE>;
v[298] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYSKEW>;
v[299] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS>;
v[300] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINEMITINTERVAL>;
v[301] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINCACHESIZE>;
v[302] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PUSH_RESIDUAL_INNER>;
v[303] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_RANGECACHE_SIZE>;
v[304] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_SIZE>;
v[305] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_COLLECT_STATISTICS>;
v[306] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_ENABLED>;
v[307] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_FALLBACK_STRATEGY>;
v[308] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_CNF_NODES_LIMIT>;
v[309] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RETPATH_HIVEOP>;
v[310] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_EXTENDED_COST_MODEL>;
v[311] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_CPU>;
v[312] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_NET>;
v[313] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_WRITE>;
v[314] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_READ>;
v[315] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_WRITE>;
v[316] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_READ>;
v[317] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RULE_EXCLUSION_REGEX>;
v[318] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_SHOW_WARNINGS>;
v[319] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS>;
v[320] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CARDINALITY_PRESERVING_JOIN_OPTIMIZATION_FACTOR>;
v[321] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE>;
v[322] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE_UNIQUE>;
v[323] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMIJOIN_CONVERSION>;
v[324] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COLUMN_ALIGNMENT>;
v[325] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING>;
v[326] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SQL>;
v[327] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SUBQUERY_SQL>;
v[328] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY>;
v[329] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW>;
v[330] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL>;
v[331] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL>;
v[332] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR>;
v[333] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_FILE_FORMAT>;
v[334] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_SERDE>;
v[335] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_PUSHDOWN>;
v[336] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_SAFE_PUSHDOWN>;
v[337] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINBUCKETCACHESIZE>;
v[338] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINUSEOPTIMIZEDTABLE>;
v[339] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT>;
v[340] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINPARALELHASHTABLETHREADS>;
v[341] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEHYBRIDGRACEHASHJOIN>;
v[342] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ>;
v[343] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINWBSIZE>;
v[344] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS>;
v[345] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEWBSIZE>;
v[346] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINBLOOMFILTER>;
v[347] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFULLOUTER>;
v[348] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE>;
v[349] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMBJOINCACHEROWS>;
v[350] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYMAPINTERVAL>;
v[351] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMEMORY>;
v[352] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY>;
v[353] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRMEMORYTHRESHOLD>;
v[354] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTION>;
v[355] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND>;
v[356] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST>;
v[357] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMULTIGROUPBYSINGLEREDUCER>;
v[358] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAP_GROUPBY_SORT>;
v[359] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_NULLS_LAST>;
v[360] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_POSITION_ALIAS>;
v[361] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORDERBY_POSITION_ALIAS>;
v[362] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_ORDERBY_POSITION_ALIAS>;
v[363] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NEW_JOB_GROUPING_SET_CARDINALITY>;
v[364] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_LIMIT_EXTRASTEP>;
v[365] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXNUMFILES>;
v[366] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXSIZE>;
v[367] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUDTFAUTOPROGRESS>;
v[368] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTFILEFORMAT>;
v[369] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTMANAGEDFILEFORMAT>;
v[370] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_STORAGE_HANDLER>;
v[371] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYRESULTFILEFORMAT>;
v[372] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECHECKFILEFORMAT>;
v[373] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTRCFILESERDE>;
v[374] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTSERDE>;
v[375] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERDESUSINGMETASTOREFORSCHEMA>;
v[376] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES>;
v[377] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHISTORYFILELOC>;
v[378] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS>;
v[379] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL>;
v[380] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTSERDE>;
v[381] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDREADER>;
v[382] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDWRITER>;
v[383] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTESCAPE>;
v[384] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEBINARYRECORDMAX>;
v[385] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHADOOPMAXMEM>;
v[386] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMALLTABLESFILESIZE>;
v[387] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEMA_EVOLUTION>;
v[388] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION>;
v[389] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_TABLE_SCAN>;
v[390] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY>;
v[391] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars FILTER_DELETE_EVENTS>;
v[392] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLERANDOMNUM>;
v[393] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODE>;
v[394] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXIMTESTMODE>;
v[395] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEPREFIX>;
v[396] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODESAMPLEFREQ>;
v[397] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODENOSAMPLE>;
v[398] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATAGGR>;
v[399] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATPUB>;
v[400] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTCURRENTTIMESTAMP>;
v[401] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEROLLBACKTXN>;
v[402] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILCOMPACTION>;
v[403] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILLOADDYNAMICPARTITION>;
v[404] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILHEARTBEATER>;
v[405] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TESTMODE_BUCKET_CODEC_VERSION>;
v[406] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTEND_BUCKET_ID_RANGE>;
v[407] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEACIDKEYIDXSKIP>;
v[408] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILES>;
v[409] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPREDFILES>;
v[410] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGETEZFILES>;
v[411] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESSIZE>;
v[412] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESAVGSIZE>;
v[413] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGERCFILEBLOCKLEVEL>;
v[414] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEORCFILESTRIPELEVEL>;
v[415] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CODEC_POOL>;
v[416] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEEXPLICITRCFILEHEADER>;
v[417] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSERCFILESYNCCACHE>;
v[418] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_INTERVAL>;
v[419] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_COLUMN_NUMBER_CONF>;
v[420] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_TOLERATE_CORRUPTIONS>;
v[421] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_BUFFER_SIZE>;
v[422] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PARQUET_MEMORY_POOL_RATIO>;
v[423] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION>;
v[424] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN>;
v[425] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN_DEFAULT>;
v[426] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_LEGACY_CONVERSION_ENABLED>;
v[427] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED>;
v[428] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_INFER_BINARY_AS>;
v[429] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION>;
v[430] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN>;
v[431] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT>;
v[432] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED>;
v[433] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED>;
v[434] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS>;
v[435] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_WRITE_INT64_TIMESTAMP>;
v[436] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_TIME_UNIT>;
v[437] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BASE_DELTA_RATIO>;
v[438] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED>;
v[439] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_STRATEGY>;
v[440] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE>;
v[441] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED>;
v[442] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_ENABLED>;
v[443] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD>;
v[444] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE>;
v[445] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLASSLOADER_SHADE_PREFIX>;
v[446] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_ENABLED>;
v[447] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_PPD>;
v[448] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS>;
v[449] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS>;
v[450] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS>;
v[451] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS>;
v[452] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE>;
v[453] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS>;
v[454] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_USE_SOFT_REFERENCES>;
v[455] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB>;
v[456] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL>;
v[457] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOIN>;
v[458] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDYNAMICPARTITIONHASHJOIN>;
v[459] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOIN>;
v[460] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASK>;
v[461] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_ANTI_JOIN>;
v[462] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD>;
v[463] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINUSENONSTAGED>;
v[464] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINKEY>;
v[465] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINNUMMAPTASK>;
v[466] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINMINSPLIT>;
v[467] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESENDHEARTBEAT>;
v[468] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITMAXROWSIZE>;
v[469] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTLIMITFILE>;
v[470] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTENABLE>;
v[471] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTMAXFETCH>;
v[472] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITPUSHDOWNMEMORYUSAGE>;
v[473] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXENTRIESHASHTABLE>;
v[474] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars XPRODSMALLTABLEROWSTHRESHOLD>;
v[475] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXSHUFFLESIZE>;
v[476] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEKEYCOUNTADJUSTMENT>;
v[477] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLETHRESHOLD>;
v[478] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLELOADFACTOR>;
v[479] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE>;
v[480] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEMAXMEMORYUSAGE>;
v[481] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLESCALE>;
v[482] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEINPUTFORMAT>;
v[483] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZINPUTFORMAT>;
v[484] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCONTAINERSIZE>;
v[485] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCPUVCORES>;
v[486] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJAVAOPTS>;
v[487] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZLOGLEVEL>;
v[488] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZHS2USERACCESS>;
v[489] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYNAME>;
v[490] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJOBNAME>;
v[491] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_PRUNING>;
v[492] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE>;
v[493] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEBUCKETINGSORTING>;
v[494] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONER>;
v[495] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCESORTMERGEBUCKETMAPJOIN>;
v[496] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCEBUCKETMAPJOIN>;
v[497] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SORT_WHEN_BUCKETING>;
v[498] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENFORCE_NOT_NULL_CONSTRAINT>;
v[499] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN>;
v[500] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_REDUCE>;
v[501] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR>;
v[502] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN>;
v[503] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTOPERATORTRUST>;
v[504] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEROWOFFSET>;
v[505] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTINDEXFILTER>;
v[506] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD>;
v[507] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_WINDOWING>;
v[508] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDRECOGNIZETRANSITIVITY>;
v[509] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES>;
v[510] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDREMOVEDUPLICATEFILTERS>;
v[511] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN>;
v[512] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZER>;
v[513] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZERMIN>;
v[514] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPT_TRANSFORM_IN_MAXNODES>;
v[515] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTDISTINCTOPTIMIZER>;
v[516] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONCOLUMNSEPARATOR>;
v[517] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCONSTANTPROPAGATION>;
v[518] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIDENTITYPROJECTREMOVER>;
v[519] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMETADATAONLYQUERIES>;
v[520] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVENULLSCANOPTIMIZE>;
v[521] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_STORAGE>;
v[522] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTGROUPBY>;
v[523] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTBUCKETMAPJOIN>;
v[524] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTMERGEBUCKETMAPJOIN>;
v[525] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATION>;
v[526] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATIONMINREDUCER>;
v[527] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTJOINREDUCEDEDUPLICATION>;
v[528] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD>;
v[529] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGFORORDERBY>;
v[530] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGNUMBERFORORDERBY>;
v[531] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGPERCENTFORORDERBY>;
v[532] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_ORDERBY_IN_SUBQUERY>;
v[533] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEDISTINCTREWRITE>;
v[534] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_UNION_REMOVE>;
v[535] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCORRELATION>;
v[536] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE>;
v[537] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE>;
v[538] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES>;
v[539] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_CONSTRAINTS_JOIN>;
v[540] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SORT_PREDS_WITH_STATS>;
v[541] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_REDUCE_WITH_STATS>;
v[542] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME>;
v[543] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT>;
v[544] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TOPNKEY>;
v[545] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_TOPN_ALLOWED>;
v[546] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_THRESHOLD>;
v[547] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_CHECK_BATCHES>;
v[548] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_MAX_NUMBER_OF_PARTITIONS>;
v[549] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_OPTIMIZATION>;
v[550] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION>;
v[551] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION>;
v[552] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_MERGE_TS_SCHEMA>;
v[553] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE>;
v[554] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_OPTIMIZATION>;
v[555] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_MERGE_EVENTOPS>;
v[556] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DOWNSTREAM_MERGE>;
v[557] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_PARALLEL_EDGE_SUPPORT>;
v[558] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_SQ_COUNT_CHECK>;
v[559] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE>;
v[560] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST>;
v[561] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SCAN_PROBEDECODE>;
v[562] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_HMS_QUERY_CACHE_ENABLED>;
v[563] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_VIEW_CACHE_ENABLED>;
v[564] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_THRESHOLD>;
v[565] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_FULL_AGGREGATE_ONLY>;
v[566] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_ENABLED>;
v[567] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNTDISTINCT_ENABLED>;
v[568] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNT_DISTINCT_SKETCH>;
v[569] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_ENABLED>;
v[570] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_SKETCH>;
v[571] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_ENABLED>;
v[572] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_SKETCH>;
v[573] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_ENABLED>;
v[574] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_SKETCH>;
v[575] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_ENABLED>;
v[576] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_SKETCH>;
v[577] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATE_STATS>;
v[578] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ESTIMATE_PERC>;
v[579] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_NDV_READJUSTMENT>;
v[580] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NUM_NULLS_ESTIMATE_PERC>;
v[581] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSAUTOGATHER>;
v[582] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSCOLAUTOGATHER>;
v[583] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSDBCLASS>;
v[584] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_PUBLISHER>;
v[585] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_AGGREGATOR>;
v[586] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENT_STATS_COUNTERS>;
v[587] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RELIABLE>;
v[588] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_PART_LEVEL_STATS>;
v[589] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_GATHER_NUM_THREADS>;
v[590] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_TABLEKEYS>;
v[591] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_SCANCOLS>;
v[592] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ALGO>;
v[593] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_BITVECTOR>;
v[594] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ERROR>;
v[595] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATORS_ENABLE>;
v[596] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_TUNER>;
v[597] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION>;
v[598] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_KEY_PREFIX>;
v[599] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_VARIABLE_LENGTH>;
v[600] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_LIST_NUM_ENTRIES>;
v[601] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAP_NUM_ENTRIES>;
v[602] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_COLUMN_STATS>;
v[603] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_FACTOR>;
v[604] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_CORRELATED_MULTI_KEY_JOINS>;
v[605] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RANGE_SELECTIVITY_UNIFORM_DISTRIBUTION>;
v[606] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DESERIALIZATION_FACTOR>;
v[607] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_CLAUSE_FACTOR>;
v[608] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_MIN_RATIO>;
v[609] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_UDTF_FACTOR>;
v[610] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_USE_BITVECTORS>;
v[611] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_NUM_STATS>;
v[612] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_THRIFT_CLIENT_MAX_MESSAGE_SIZE>;
v[613] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_CONCURRENCY>;
v[614] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MANAGER>;
v[615] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_NUMRETRIES>;
v[616] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_UNLOCK_NUMRETRIES>;
v[617] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_SLEEP_BETWEEN_RETRIES>;
v[618] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MAPRED_ONLY>;
v[619] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_QUERY_STRING_MAX_LENGTH>;
v[620] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_ALLOW_ORIGINALS>;
v[621] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_FILE_MOVE_MODE>;
v[622] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_USE_KERBEROS>;
v[623] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_QUORUM>;
v[624] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLIENT_PORT>;
v[625] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SESSION_TIMEOUT>;
v[626] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_TIMEOUT>;
v[627] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_NAMESPACE>;
v[628] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES>;
v[629] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES>;
v[630] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME>;
v[631] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_ENABLE>;
v[632] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_LOCATION>;
v[633] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_PASSWORD>;
v[634] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION>;
v[635] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD>;
v[636] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_ENABLE>;
v[637] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE>;
v[638] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MANAGER>;
v[639] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_EXT_LOCKING_ENABLED>;
v[640] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STRICT_LOCKING_MODE>;
v[641] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_NONACID_READ_LOCKS>;
v[642] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READ_LOCKS>;
v[643] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCKS_PARTITION_THRESHOLD>;
v[644] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_OVERWRITE_X_LOCK>;
v[645] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MERGE_INSERT_X_LOCK>;
v[646] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_WRITE_X_LOCK>;
v[647] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STATS_ENABLED>;
v[648] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_DIR_CACHE_DURATION>;
v[649] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_ACID_VERSION_FILE>;
v[650] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READONLY_ENABLED>;
v[651] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_LOCKLESS_READS_ENABLED>;
v[652] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_CREATE_TABLE_USE_SUFFIX>;
v[653] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_TRUNCATE_USE_BASE>;
v[654] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DROP_PARTITION_USE_BASE>;
v[655] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_RENAME_PARTITION_MAKE_COPY>;
v[656] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_MAX_CACHE_SIZE>;
v[657] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_REPORTING_INTERVAL>;
v[658] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_NUM_THRESHOLD>;
v[659] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_OBSOLETE_DELTA_NUM_THRESHOLD>;
v[660] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_PCT_THRESHOLD>;
v[661] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_TIMEOUT>;
v[662] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE>;
v[663] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT>;
v[664] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_OPERATIONAL_PROPERTIES>;
v[665] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_OPEN_TXNS>;
v[666] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COUNT_OPEN_TXNS_INTERVAL>;
v[667] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_OPEN_BATCH>;
v[668] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_RETRYABLE_SQLEX_REGEX>;
v[669] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_ON>;
v[670] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_THREADS>;
v[671] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_TIMEOUT>;
v[672] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CHECK_INTERVAL>;
v[673] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_DURATION_UPDATE_INTERVAL>;
v[674] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_DURATION_UPDATE_INTERVAL>;
v[675] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_REQUEST_QUEUE>;
v[676] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_NUM_THRESHOLD>;
v[677] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_PCT_THRESHOLD>;
v[678] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_MAX_NUM_DELTA>;
v[679] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD>;
v[680] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_TIME_THRESHOLD>;
v[681] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACTIVE_DELTA_DIR_THRESHOLD>;
v[682] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_OBSOLETE_DELTA_DIR_THRESHOLD>;
v[683] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_SMALL_DELTA_DIR_THRESHOLD>;
v[684] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACID_METRICS_LOGGER_FREQUENCY>;
v[685] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WAIT_TIMEOUT>;
v[686] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MR_COMPACTOR_GATHER_STATS>;
v[687] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_GATHER_STATS>;
v[688] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_INITIATOR_FAILED_THRESHOLD>;
v[689] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RUN_INTERVAL>;
v[690] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED>;
v[691] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RETENTION_TIME>;
v[692] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_THREADS_NUM>;
v[693] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_JOB_QUEUE>;
v[694] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TRANSACTIONAL_CONCATENATE_NOBLOCK>;
v[695] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CONCATENATE_EXTERNAL_TABLE>;
v[696] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_COMPACT_MM>;
v[697] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_CRUD_QUERY_BASED>;
v[698] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_GROUPING_MODE>;
v[699] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_SUCCEEDED>;
v[700] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_FAILED>;
v[701] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_ATTEMPTED>;
v[702] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_REAPER_INTERVAL>;
v[703] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TIMEDOUT_TXN_REAPER_INTERVAL>;
v[704] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars WRITE_SET_REAPER_INTERVAL>;
v[705] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_CARDINALITY_VIOLATION_CHECK>;
v[706] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_UPDATE>;
v[707] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_SPLIT_UPDATE>;
v[708] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars OPTIMIZE_ACID_META_COLUMNS>;
v[709] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_ROOT_ALLOCATOR_LIMIT>;
v[710] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_ALLOCATOR_LIMIT>;
v[711] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_SIZE>;
v[712] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_INDEXING_GRANULARITY>;
v[713] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_PARTITION_SIZE>;
v[714] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_ROW_IN_MEMORY>;
v[715] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BROKER_DEFAULT_ADDRESS>;
v[716] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS>;
v[717] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS>;
v[718] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SELECT_THRESHOLD>;
v[719] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_NUM_HTTP_CONNECTION>;
v[720] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_HTTP_READ_TIMEOUT>;
v[721] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SLEEP_TIME>;
v[722] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BASE_PERSIST_DIRECTORY>;
v[723] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_ROLLUP>;
v[724] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY>;
v[725] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_BASE>;
v[726] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_TYPE>;
v[727] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_USERNAME>;
v[728] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_PASSWORD>;
v[729] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_URI>;
v[730] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_WORKING_DIR>;
v[731] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_TRIES>;
v[732] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_PASSIVE_WAIT_TIME>;
v[733] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BITMAP_FACTORY_TYPE>;
v[734] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_KERBEROS_ENABLE>;
v[735] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_WAL_ENABLED>;
v[736] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_GENERATE_HFILES>;
v[737] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_NAME>;
v[738] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_RESTORE_DIR>;
v[739] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_HBASE_URLENCODE_AUTHORIZATION_URI>;
v[740] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_KUDU_MASTER_ADDRESSES_DEFAULT>;
v[741] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEARCHIVEENABLED>;
v[742] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSION>;
v[743] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCACHING>;
v[744] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSIONTHRESHOLD>;
v[745] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKAGGR>;
v[746] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEMETADATAQUERIES>;
v[747] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHOUTPUTSERDE>;
v[748] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPREVALUATIONCACHE>;
v[749] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTE>;
v[750] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTEDEPTH>;
v[751] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONFVALIDATION>;
v[752] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMANTIC_ANALYZER_HOOK>;
v[753] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE>;
v[754] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_ENABLED>;
v[755] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME>;
v[756] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_MANAGER>;
v[757] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHENTICATOR_MANAGER>;
v[758] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_MANAGER>;
v[759] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_AUTH_READS>;
v[760] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHENTICATOR_MANAGER>;
v[761] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_USER_GRANTS>;
v[762] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS>;
v[763] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS>;
v[764] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS>;
v[765] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TASK_FACTORY>;
v[766] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLES_ON_STORAGEHANDLERS>;
v[767] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST>;
v[768] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND>;
v[769] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_HEADER>;
v[770] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_ESCAPE_CRLF>;
v[771] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_TEZ_SESSION_ASYNC>;
v[772] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS>;
v[773] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_MANAGED_TABLES>;
v[774] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTERNALTABLE_PURGE_DEFAULT>;
v[775] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ERROR_ON_EMPTY_PARTITION>;
v[776] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_URI_SCHEME_WL>;
v[777] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES>;
v[778] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_TASK_FACTORY>;
v[779] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FILTER_TRANSACTIONS>;
v[780] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS>;
v[781] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REWORK_MAPREDWORK>;
v[782] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_EXCEPTION_HANDLERS>;
v[783] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG4J_FILE>;
v[784] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_LOG4J_FILE>;
v[785] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_LOG_ENABLED>;
v[786] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT>;
v[787] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_TO_CONSOLE>;
v[788] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_INCLUDE_EXTENDED>;
v[789] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXPLAIN_USER>;
v[790] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL>;
v[791] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME>;
v[792] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_CLASS>;
v[793] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CODAHALE_METRICS_REPORTER_CLASSES>;
v[794] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_REPORTER>;
v[795] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_LOCATION>;
v[796] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_INTERVAL>;
v[797] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_INTERVAL>;
v[798] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_COMPONENT_NAME>;
v[799] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PERF_LOGGER>;
v[800] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_START_CLEANUP_SCRATCHDIR>;
v[801] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCRATCH_DIR_LOCK>;
v[802] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_MULTILEVEL_DIRS>;
v[803] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTAS_EXTERNAL_TABLES>;
v[804] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_EXTERNAL_TABLES>;
v[805] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEMPORARY_TABLE_STORAGE>;
v[806] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_LIFETIME_HOOKS>;
v[807] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRIVER_RUN_HOOKS>;
v[808] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DDL_OUTPUT_FORMAT>;
v[809] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENTITY_SEPARATOR>;
v[810] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CAPTURE_TRANSFORM_ENTITY>;
v[811] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY>;
v[812] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LINEAGE_INFO>;
v[813] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SSL_PROTOCOL_BLACKLIST>;
v[814] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER>;
v[815] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL>;
v[816] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR>;
v[817] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL>;
v[818] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS>;
v[819] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MAX_START_ATTEMPTS>;
v[820] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY>;
v[821] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_NAMESPACE>;
v[822] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS>;
v[823] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_PROXY_TRUSTHEADER>;
v[824] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION>;
v[825] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRANSPORT_MODE>;
v[826] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_BIND_HOST>;
v[827] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION>;
v[828] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT>;
v[829] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_COMPILE_LOCK_TIMEOUT>;
v[830] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_OPS_IN_SESSION>;
v[831] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL>;
v[832] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_REFRESH>;
v[833] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_BIND_HOST>;
v[834] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_PORT>;
v[835] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_THREADS>;
v[836] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SSL>;
v[837] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH>;
v[838] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD>;
v[839] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_TYPE>;
v[840] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_EXCLUDE_CIPHERSUITES>;
v[841] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYMANAGERFACTORY_ALGORITHM>;
v[842] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SPNEGO>;
v[843] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB>;
v[844] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL>;
v[845] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES>;
v[846] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_PAM>;
v[847] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT>;
v[848] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_GRAPH>;
v[849] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE>;
v[850] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_STATS>;
v[851] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_ENABLE_CORS>;
v[852] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS>;
v[853] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS>;
v[854] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS>;
v[855] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_ENABLED>;
v[856] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_VALUE>;
v[857] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SHOW_OPERATION_DRILLDOWN_LINK>;
v[858] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE>;
v[859] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE>;
v[860] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE>;
v[861] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_NAMESPACE>;
v[862] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_WORKER_THREADS>;
v[863] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC>;
v[864] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_POOL_METRICS>;
v[865] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT>;
v[866] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE>;
v[867] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_TIMEOUT>;
v[868] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_VALIDATOR_INTERVAL>;
v[869] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_DEFAULT_QUEUES>;
v[870] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE>;
v[871] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS>;
v[872] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK>;
v[873] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME>;
v[874] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER>;
v[875] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS>;
v[876] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS>;
v[877] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED>;
v[878] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_ENABLED>;
v[879] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION>;
v[880] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LEVEL>;
v[881] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY>;
v[882] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_PURGEPOLICY_TIMETOLIVE>;
v[883] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_ENABLED>;
v[884] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_CHECK_INTERVAL>;
v[885] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_FETCH_MAXBYTES>;
v[886] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER>;
v[887] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS>;
v[888] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS>;
v[889] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_METRICS_ENABLED>;
v[890] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PORT>;
v[891] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PATH>;
v[892] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE>;
v[893] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME>;
v[894] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME>;
v[895] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE>;
v[896] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE>;
v[897] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED>;
v[898] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED>;
v[899] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE>;
v[900] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN>;
v[901] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH>;
v[902] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE>;
v[903] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY>;
v[904] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_PORT>;
v[905] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_SASL_QOP>;
v[906] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS>;
v[907] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS>;
v[908] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH>;
v[909] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT>;
v[910] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME>;
v[911] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_THREADS>;
v[912] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT>;
v[913] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE>;
v[914] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME>;
v[915] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE>;
v[916] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LONG_POLLING_TIMEOUT>;
v[917] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_CLASSNAME>;
v[918] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME>;
v[919] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION>;
v[920] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN>;
v[921] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN_USE_XFF_HEADER>;
v[922] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ALLOW_USER_SUBSTITUTION>;
v[923] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_KEYTAB>;
v[924] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_PRINCIPAL>;
v[925] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL>;
v[926] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_KEYTAB>;
v[927] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_PRINCIPAL>;
v[928] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_URL>;
v[929] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BASEDN>;
v[930] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_DOMAIN>;
v[931] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN>;
v[932] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER>;
v[933] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN>;
v[934] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERFILTER>;
v[935] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GUIDKEY>;
v[936] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY>;
v[937] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY>;
v[938] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY>;
v[939] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY>;
v[940] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_USER>;
v[941] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD>;
v[942] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS>;
v[943] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PAM_SERVICES>;
v[944] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_URL>;
v[945] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_SKIP_SSL_CERT>;
v[946] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PATH>;
v[947] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PASSWORD>;
v[948] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_PRIVATE_KEY_PASSWORD>;
v[949] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_IDP_METADATA>;
v[950] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SP_ID>;
v[951] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_FORCE_AUTH>;
v[952] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_AUTHENTICATION_LIFETIME>;
v[953] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_BLACKLISTED_SIGNATURE_ALGORITHMS>;
v[954] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_ACS_INDEX>;
v[955] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_URL>;
v[956] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_WANT_ASSERTIONS_SIGNED>;
v[957] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SIGN_REQUESTS>;
v[958] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_TOKEN_TTL>;
v[959] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_ATTRIBUTE_NAME>;
v[960] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_FILTER>;
v[961] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ENABLE_DOAS>;
v[962] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SERVICE_USERS>;
v[963] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISTCP_DOAS_USER>;
v[964] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TABLE_TYPE_MAPPING>;
v[965] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_HOOK>;
v[966] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_USE_SSL>;
v[967] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PATH>;
v[968] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PASSWORD>;
v[969] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_TYPE>;
v[970] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYMANAGERFACTORY_ALGORITHM>;
v[971] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_HTTP_EXCLUDE_CIPHERSUITES>;
v[972] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_BINARY_INCLUDE_CIPHERSUITES>;
v[973] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_WHITELIST>;
v[974] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_BLACKLIST>;
v[975] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ALLOW_UDF_LOAD_ON_DEMAND>;
v[976] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_CHECK_INTERVAL>;
v[977] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT>;
v[978] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_TIMEOUT>;
v[979] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_OPERATION_TIMEOUT>;
v[980] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION>;
v[981] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT>;
v[982] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT>;
v[983] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS>;
v[984] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_USER>;
v[985] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_PASSWORD>;
v[986] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS>;
v[987] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE>;
v[988] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE>;
v[989] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_XSRF_FILTER_ENABLED>;
v[990] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_COMMAND_WHITELIST>;
v[991] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH>;
v[992] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GRACEFUL_STOP_TIMEOUT>;
v[993] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MOVE_FILES_THREAD_COUNT>;
v[994] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT>;
v[995] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS>;
v[996] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES>;
v[997] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON>;
v[998] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT>;
v[999] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO>;
v[1000] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTLISTBUCKETING>;
v[1001] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_READ_SOCKET_TIMEOUT>;
v[1002] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_TCP_KEEP_ALIVE>;
v[1003] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DECODE_PARTITION_NAME>;
v[1004] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_ENGINE>;
v[1005] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_MODE>;
v[1006] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JAR_DIRECTORY>;
v[1007] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_USER_INSTALL_DIR>;
v[1008] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MASKING_ALGO>;
v[1009] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ENABLED>;
v[1010] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_ENABLED>;
v[1011] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED>;
v[1012] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED>;
v[1013] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED>;
v[1014] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED>;
v[1015] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD>;
v[1016] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED>;
v[1017] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL>;
v[1018] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_MAXENTRIES>;
v[1019] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT>;
v[1020] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED>;
v[1021] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT>;
v[1022] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES>;
v[1023] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE>;
v[1024] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_ROW_DESERIALIZE>;
v[1025] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES>;
v[1026] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTOR_ADAPTOR_USAGE_MODE>;
v[1027] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE>;
v[1028] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_ENABLED>;
v[1029] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT>;
v[1030] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE>;
v[1031] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS>;
v[1032] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED>;
v[1033] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED>;
v[1034] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED>;
v[1035] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS>;
v[1036] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS>;
v[1037] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED>;
v[1038] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_IF_EXPR_MODE>;
v[1039] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE>;
v[1040] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE>;
v[1041] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS>;
v[1042] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED>;
v[1043] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TYPE_CHECK_ON_INSERT>;
v[1044] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HADOOP_CLASSPATH>;
v[1045] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RPC_QUERY_PLAN>;
v[1046] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PLAN_MAPWORK_SERIALIZATION_SKIP_PROPERTIES>;
v[1047] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AM_SPLIT_GENERATION>;
v[1048] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SPLITS_AVAILABLE_SLOTS_CALCULATOR_CLASS>;
v[1049] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_GENERATE_CONSISTENT_SPLITS>;
v[1050] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_ENABLED>;
v[1051] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_NUM_CONTAINERS>;
v[1052] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTAGEIDREARRANGE>;
v[1053] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES>;
v[1054] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEGOOGLEREGEXENGINE>;
v[1055] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERGROUP>;
v[1056] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUOTEDID_SUPPORT>;
v[1057] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES>;
v[1058] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLE_AS_EXTERNAL>;
v[1059] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLES_AS_ACID>;
v[1060] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CREATE_TABLES_AS_INSERT_ONLY>;
v[1061] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DIRECT_INSERT_ENABLED>;
v[1062] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_CTAS_X_LOCK>;
v[1063] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars USERS_IN_ADMIN_ROLE>;
v[1064] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPAT>;
v[1065] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ>;
v[1066] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_BMJ_USE_SUBCACHE>;
v[1067] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CHECK_CROSS_PRODUCT>;
v[1068] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL>;
v[1069] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS>;
v[1070] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_AUTO_REDUCER_PARALLELISM>;
v[1071] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR>;
v[1072] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAPREDUCE_OUTPUT_COMMITTER>;
v[1073] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_PARTITION_FACTOR>;
v[1074] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_PARTITION_FACTOR>;
v[1075] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING>;
v[1076] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT>;
v[1077] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING>;
v[1078] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED>;
v[1079] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE>;
v[1080] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE>;
v[1081] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION>;
v[1082] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_BLOOM_FILTER_ENTRIES>;
v[1083] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_BLOOM_FILTER_ENTRIES>;
v[1084] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_FACTOR>;
v[1085] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_MERGE_THREADS>;
v[1086] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION>;
v[1087] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD>;
v[1088] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN>;
v[1089] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN>;
v[1090] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR>;
v[1091] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SMB_NUMBER_WAVES>;
v[1092] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_SUMMARY>;
v[1093] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SESSION_EVENTS_SUMMARY>;
v[1094] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_INPLACE_PROGRESS>;
v[1095] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_INPLACE_PROGRESS>;
v[1096] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DAG_STATUS_CHECK_INTERVAL>;
v[1097] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION>;
v[1098] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN>;
v[1099] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX>;
v[1100] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION>;
v[1101] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED>;
v[1102] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB>;
v[1103] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENABLED>;
v[1104] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_ONLY>;
v[1105] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ROW_WRAPPER_ENABLED>;
v[1106] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ACID_ENABLED>;
v[1107] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_SIZE>;
v[1108] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_ALWAYS_DUMP>;
v[1109] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_NONVECTOR_WRAPPER_ENABLED>;
v[1110] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MODE>;
v[1111] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MIN_ALLOC>;
v[1112] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_ALLOC>;
v[1113] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_ARENA_COUNT>;
v[1114] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MAX_SIZE>;
v[1115] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DIRECT>;
v[1116] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_PREALLOCATE>;
v[1117] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED>;
v[1118] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED_PATH>;
v[1119] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DISCARD_METHOD>;
v[1120] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DEFRAG_HEADROOM>;
v[1121] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_FORCE_EVICTED>;
v[1122] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TRACK_CACHE_USAGE>;
v[1123] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_LRFU>;
v[1124] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_LAMBDA>;
v[1125] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_HOTBUFFERS_PERCENTAGE>;
v[1126] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_BP_WRAPPER_SIZE>;
v[1127] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ALLOW_SYNTHETIC_FILEID>;
v[1128] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_DEFAULT_FS_FILE_ID>;
v[1129] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ENABLE_ORC_GAP_CACHE>;
v[1130] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_STRATEGY_CLASS>;
v[1131] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_SAVE_DIR>;
v[1132] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_USE_FILEID_PATH>;
v[1133] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ENABLED>;
v[1134] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_FORMATS>;
v[1135] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ALLOC_SIZE>;
v[1136] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED>;
v[1137] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED>;
v[1138] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_ROW_COUNT>;
v[1139] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_LRR>;
v[1140] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ORC_ENABLE_TIME_COUNTERS>;
v[1141] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MAX>;
v[1142] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MIN>;
v[1143] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CVB_BUFFERED_SIZE>;
v[1144] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_ENABLED>;
v[1145] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_SWEEP_INTERVAL>;
v[1146] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_INSTANT_DEALLOC>;
v[1147] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_DELETEDELTAS>;
v[1148] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PATH_CACHE_SIZE>;
v[1149] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_SHARE_OBJECT_POOLS>;
v[1150] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ALLOW_UBER>;
v[1151] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_TREE>;
v[1152] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_VECTORIZED>;
v[1153] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_STATS>;
v[1154] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_INPUT>;
v[1155] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_OUTPUT>;
v[1156] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SKIP_COMPILE_UDF_CHECK>;
v[1157] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOW_PERMANENT_FNS>;
v[1158] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXECUTION_MODE>;
v[1159] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ETL_SKIP_FORMAT>;
v[1160] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OBJECT_CACHE_ENABLED>;
v[1161] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS>;
v[1162] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_THREADPOOL_SIZE>;
v[1163] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_THREADPOOL_MULTIPLIER>;
v[1164] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_KERBEROS>;
v[1165] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_PRINCIPAL>;
v[1166] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_KEYTAB_FILE>;
v[1167] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_KEYTAB_FILE>;
v[1168] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_PRINCIPAL>;
v[1169] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_PRINCIPAL>;
v[1170] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_KEYTAB_FILE>;
v[1171] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_CONNECTION_STRING>;
v[1172] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_SESSION_TIMEOUT>;
v[1173] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_USER>;
v[1174] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_NAMESPACE>;
v[1175] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL>;
v[1176] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL_DENY>;
v[1177] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL>;
v[1178] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL_DENY>;
v[1179] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL>;
v[1180] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL_DENY>;
v[1181] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_REMOTE_TOKEN_REQUIRES_SIGNING>;
v[1182] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DELEGATION_TOKEN_LIFETIME>;
v[1183] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_RPC_PORT>;
v[1184] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEB_AUTO_AUTH>;
v[1185] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_NUM_HANDLERS>;
v[1186] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_PORT>;
v[1187] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_NUM_HANDLERS>;
v[1188] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HDFS_PACKAGE_DIR>;
v[1189] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WORK_DIRS>;
v[1190] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_SHUFFLE_PORT>;
v[1191] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_CONTAINER_MB>;
v[1192] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_QUEUE_NAME>;
v[1193] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_CONTAINER_ID>;
v[1194] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NM_ADDRESS>;
v[1195] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED>;
v[1196] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS>;
v[1197] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS>;
v[1198] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_USE_FQDN>;
v[1199] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_EXEC_USE_FQDN>;
v[1200] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS>;
v[1201] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS>;
v[1202] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_EXECUTORS>;
v[1203] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR>;
v[1204] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY>;
v[1205] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL>;
v[1206] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_REPORTER_MAX_THREADS>;
v[1207] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_PORT>;
v[1208] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_MEMORY_PER_INSTANCE_MB>;
v[1209] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_XMX_HEADROOM>;
v[1210] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_VCPUS_PER_INSTANCE>;
v[1211] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_FILE_CLEANER_THREADS>;
v[1212] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FILE_CLEANUP_DELAY_SECONDS>;
v[1213] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_HOSTS>;
v[1214] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_REFRESH_INTERVAL>;
v[1215] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_COMMUNICATOR_NUM_THREADS>;
v[1216] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_CLIENT_NUM_THREADS>;
v[1217] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS>;
v[1218] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_MS>;
v[1219] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_LISTENER>;
v[1220] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MINTASKS>;
v[1221] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MININTERVALDURATION>;
v[1222] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_TASKTIMERATIO>;
v[1223] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_EXECUTORRATIO>;
v[1224] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MAXNODES>;
v[1225] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME>;
v[1226] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL>;
v[1227] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE>;
v[1228] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS>;
v[1229] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS>;
v[1230] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR>;
v[1231] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT>;
v[1232] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE>;
v[1233] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_LOCALITY_DELAY>;
v[1234] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS>;
v[1235] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE>;
v[1236] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME>;
v[1237] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION>;
v[1238] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_DATA_POINTS>;
v[1239] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_WINDOW_LENGTH>;
v[1240] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_SIMPLE_AVERAGE_DATA_POINTS>;
v[1241] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS>;
v[1242] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT>;
v[1243] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAX_CONCURRENT_REQUESTS_PER_NODE>;
v[1244] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS>;
v[1245] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_UMBILICAL_SERVER_PORT>;
v[1246] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_PORT>;
v[1247] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_SSL>;
v[1248] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_ENABLED>;
v[1249] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_VALUE>;
v[1250] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CLIENT_CONSISTENT_SPLITS>;
v[1251] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SPLIT_LOCATION_PROVIDER_CLASS>;
v[1252] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_VALIDATE_ACLS>;
v[1253] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_PORT>;
v[1254] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT>;
v[1255] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE>;
v[1256] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES>;
v[1257] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT>;
v[1258] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_USE_HYBRID_CALENDAR>;
v[1259] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_DEPLOYMENT_SETUP_ENABLED>;
v[1260] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_RPC_PORT>;
v[1261] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_OUTPUT_SERVICE_PORT>;
v[1262] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET_PROVIDER>;
v[1263] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET>;
v[1264] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ENABLE_GRACE_JOIN_IN_LLAP>;
v[1265] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HS2_ENABLE_COORDINATOR>;
v[1266] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_LOGGER>;
v[1267] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OUTPUT_FORMAT_ARROW>;
v[1268] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_COLLECT_LOCK_METRICS>;
v[1269] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_TIME_SUMMARY>;
v[1270] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRIGGER_VALIDATION_INTERVAL>;
v[1271] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NWAYJOINREORDER>;
v[1272] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MERGE_NWAY_JOINS>;
v[1273] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_N_RECORDS>;
v[1274] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_PATH_VALIDATION>;
v[1275] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_SIZE>;
v[1276] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES>;
v[1277] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LLAP_CONCURRENT_QUERIES>;
v[1278] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_ENABLE_MEMORY_MANAGER>;
v[1279] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HASH_TABLE_INFLATION_FACTOR>;
v[1280] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_TRACE_ID>;
v[1281] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_AVOID_GLOBSTATUS_ON_S3>;
v[1282] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_RESTRICTED_LIST>;
v[1283] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_HIDDEN_LIST>;
v[1284] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_INTERNAL_VARIABLE_LIST>;
v[1285] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_LENGTH>;
v[1286] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_TIMEOUT_SECONDS>;
v[1287] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPUTE_SPLITS_NUM_THREADS>;
v[1288] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_INPUT_LISTING_MAX_THREADS>;
v[1289] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ENABLED>;
v[1290] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STRATEGIES>;
v[1291] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE>;
v[1292] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_RETRYSNAPSHOT_COUNT>;
v[1293] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_REEXECUTION_COUNT>;
v[1294] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS>;
v[1295] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE>;
v[1296] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE>;
v[1297] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_PLANMAPPER_LINK_RELNODES>;
v[1298] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_RECOMPILATION_COUNT>;
v[1299] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_ENABLED>;
v[1300] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_NAMESPACE>;
v[1301] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_IDLE_SLEEP_TIME>;
v[1302] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_PROGRESS_REPORT_INTERVAL>;
v[1303] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_CREATE_AS_ENABLED>;
v[1304] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_AUTHORIZATION_SCHEDULED_QUERIES_SUPPORTED>;
v[1305] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_MAX_EXECUTORS>;
v[1306] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_THREAD_COUNT>;
v[1307] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_QUEUE_SIZE>;
v[1308] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_ENABLED>;
v[1309] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED>;
v[1310] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS>;
v[1311] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_DIRECTORY>;
v[1312] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME>;
v[1313] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_SIZE>;
v[1314] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE>;
v[1315] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_POLL_INTERVAL>;
v[1316] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_CONSUMERS>;
v[1317] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DESCRIBE_PARTITIONED_TABLE_IGNORE_STATS>;
v[1318] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ICEBERG_METADATA_GENERATOR_THREADS>;
v[1319] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_SUPPORTED_SCHEMES>;
v[1320] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR>;
v[1321] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED>;
v[1322] = v;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_CONFIG_FILES>;
v[1323] = v;
return v;
}
static void <clinit>()
{
java.util.EnumSet v;
org.apache.hadoop.hive.conf.Validator$PatternSet v, v, v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ResultFileFormat v;
org.apache.hadoop.hive.conf.Validator$SizeValidator v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.Validator$RatioValidator v, v;
org.apache.hadoop.hive.conf.Validator$StringSet v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.Integer v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.Float v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.String v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.Validator$WritableDirectoryValidator v;
java.util.concurrent.TimeUnit v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.String[] v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars[] v;
int v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.Boolean v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
java.lang.Long v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.HiveServer2TransportMode v, v, v, v;
org.apache.hadoop.hive.conf.Validator$RangeValidator v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.Validator$TimeValidator v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
org.apache.hadoop.hive.conf.HiveConf$ConfVars v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MSC_CACHE_ENABLED", 0, "hive.metastore.client.cache.v.enabled", v, "This property enables a Caffeine Cache for Metastore client");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("MSC_CACHE_MAX_SIZE", 1, "hive.metastore.client.cache.v.maxSize", "1Gb", v, "Set the maximum size (number of bytes) of the metastore client cache (DEFAULT: 1GB). Only in effect when the cache is enabled");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_MAX_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MSC_CACHE_RECORD_STATS", 2, "hive.metastore.client.cache.v.recordStats", v, "This property enables recording metastore client cache stats in DEBUG logs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MSC_CACHE_RECORD_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SCRIPTWRAPPER", 3, "hive.exec.script.wrapper", null, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTWRAPPER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("PLAN", 4, "hive.exec.plan", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PLAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("STAGINGDIR", 5, "hive.exec.stagingdir", ".hive-staging", "Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STAGINGDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SCRATCHDIR", 6, "hive.exec.scratchdir", "/tmp/hive", "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPLDIR", 7, "hive.repl.rootdir", "/user/${system:user.name}/repl/", "HDFS root dir for all replication dumps.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPLCMENABLED", 8, "hive.repl.cm.enabled", v, "Turn on ChangeManager, so delete files will go to cmrootdir.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPLCMDIR", 9, "hive.repl.cmrootdir", "/user/${system:user.name}/cmroot/", "Root dir for ChangeManager, used for deleted files.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit DAYS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPLCMRETIAN", 10, "hive.repl.cm.retain", "10d", v, "Time to retain removed files in cmrootdir.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMRETIAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPLCMENCRYPTEDDIR", 11, "hive.repl.cm.encryptionzone.rootdir", ".cmroot", "Root dir for ChangeManager if encryption zones are enabled, used for deleted files.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMENCRYPTEDDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPLCMFALLBACKNONENCRYPTEDDIR", 12, "hive.repl.cm.nonencryptionzone.rootdir", "", "Root dir for ChangeManager for non encrypted paths if hive.repl.cmrootdir is encrypted.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMFALLBACKNONENCRYPTEDDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPLCMINTERVAL", 13, "hive.repl.cm.interval", "3600s", v, "Interval for cmroot cleanup thread.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPLCMINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE", 14, "hive.repl.ha.datapath.replace.remote.nameservice", v, "When HDFS is HA enabled and both source and target clusters are configured with same nameservice name,enable this flag and provide a new unique logical name for representing the remote cluster nameservice using config \'hive.repl.ha.datapath.replace.remote.nameservice.name\'.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE_NAME", 15, "hive.repl.ha.datapath.replace.remote.nameservice.name", null, "When HDFS is HA enabled and both source and target clusters are configured with same nameservice name, use this config to provide a unique logical name for nameservice on the remote cluster (should be different from nameservice name on the local cluster)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_FUNCTIONS_ROOT_DIR", 16, "hive.repl.replica.functions.root.dir", "/user/${system:user.name}/repl/functions/", "Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FUNCTIONS_ROOT_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_APPROX_MAX_LOAD_TASKS", 17, "hive.repl.approx.max.load.tasks", v, "Provide an approximation of the maximum number of tasks that should be executed before \ndynamically generating the next set of tasks. The number is approximate as Hive \nwill stop at a slightly higher number, the reason being some events might lead to a \ntask increment that would cross the specified limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_APPROX_MAX_LOAD_TASKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_PARTITIONS_DUMP_PARALLELISM", 18, "hive.repl.partitions.dump.parallelism", v, "Number of threads that will be used to dump partition data information during repl dump.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARTITIONS_DUMP_PARALLELISM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RUN_DATA_COPY_TASKS_ON_TARGET", 19, "hive.repl.run.data.copy.tasks.on.target", v, "Indicates whether replication should run data copy tasks during repl load operation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RUN_DATA_COPY_TASKS_ON_TARGET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_DUMP_METADATA_ONLY", 20, "hive.repl.dump.metadata.only", v, "Indicates whether replication dump only metadata information or data + metadata. \nThis config makes hive.repl.include.external.tables config ineffective.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RETAIN_PREV_DUMP_DIR", 21, "hive.repl.retain.prev.dump.dir", v, "If this is set to false, then all previously used dump-directories will be deleted after repl-dump. If true, a number of latest dump-directories specified by hive.repl.retain.prev.dump.dir.count will be retained");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RETAIN_PREV_DUMP_DIR_COUNT", 22, "hive.repl.retain.prev.dump.dir.count", v, "Indicates maximum number of latest previously used dump-directories which would be retained when hive.repl.retain.prev.dump.dir is set to true");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_PREV_DUMP_DIR_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET", 23, "hive.repl.retain.custom.db.locations.on.target", v, "Indicates if source database has custom warehouse locations, whether that should be retained on target as well");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_INCLUDE_MATERIALIZED_VIEWS", 24, "hive.repl.include.materialized.views", v, "Indicates whether replication of materialized views is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_MATERIALIZED_VIEWS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY", 25, "hive.repl.dump.skip.immutable.data.copy", v, "Indicates whether replication dump can skip copyTask and refer to  \n original path instead. This would retain all table and partition meta");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE", 26, "hive.repl.dump.metadata.only.for.external.table", v, "Indicates whether external table replication dump only metadata information or data + metadata");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_BOOTSTRAP_ACID_TABLES", 27, "hive.repl.bootstrap.acid.tables", v, "Indicates if repl dump should bootstrap the information about ACID tables along with \nincremental dump for replication. It is recommended to keep this config parameter \nas false always and should be set to true only via WITH clause of REPL DUMP \ncommand. It should be set to true only once for incremental repl dump on \neach of the existing replication policies after enabling acid tables replication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_ACID_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit HOURS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT", 28, "hive.repl.bootstrap.dump.open.txn.timeout", "1h", v, "Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. If these open transactions are not closed within the timeout value, then REPL DUMP will forcefully abort those transactions and continue with bootstrap dump.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_BOOTSTRAP_DUMP_ABORT_WRITE_TXN_AFTER_TIMEOUT", 29, "hive.repl.bootstrap.dump.abort.write.txn.after.timeout", v, "Indicates whether to abort write transactions belonging to the db under replication while doing a bootstrap dump after the timeout configured by hive.repl.bootstrap.dump.open.txn.timeout. If set to false, bootstrap dump will fail.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_DUMP_ABORT_WRITE_TXN_AFTER_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_ADD_RAW_RESERVED_NAMESPACE", 30, "hive.repl.add.raw.reserved.namespace", v, "For TDE with same encryption keys on source and target, allow Distcp super user to access \nthe raw bytes from filesystem without decrypting on source and then encrypting on target.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ADD_RAW_RESERVED_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_INCLUDE_EXTERNAL_TABLES", 31, "hive.repl.include.external.tables", v, "Indicates if repl dump should include information about external tables. It should be \nused in conjunction with \'hive.repl.dump.metadata.only\' set to false. if \'hive.repl.dump.metadata.only\' \n is set to true then this config parameter has no effect as external table meta data is flushed \n always by default. If this config parameter is enabled on an on-going replication policy which is in\n incremental phase, then need to set \'hive.repl.bootstrap.external.tables\' to true for the first \n repl dump to bootstrap all external tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_EXTERNAL_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_BOOTSTRAP_EXTERNAL_TABLES", 32, "hive.repl.bootstrap.external.tables", v, "Indicates if repl dump should bootstrap the information about external tables along with incremental \ndump for replication. It is recommended to keep this config parameter as false always and should be \nset to true only via WITH clause of REPL DUMP command. It should be used in conjunction with \n\'hive.repl.include.external.tables\' when sets to true. If \'hive.repl.include.external.tables\' is \nset to false, then this config parameter has no effect. It should be set to true only once for \nincremental repl dump on each existing replication policy after enabling external tables replication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_BOOTSTRAP_EXTERNAL_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_EXTERNAL_TABLE_BASE_DIR", 33, "hive.repl.replica.external.table.base.dir", null, "This is the fully qualified base directory on the target/replica warehouse under which data for external tables is stored. This is relative base path and hence prefixed to the source external table path on target cluster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_TABLE_BASE_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK", 34, "hive.repl.external.warehouse.single.copy.task", v, "Should create single copy task for all the external tables within the database default location for external tables, Would require more memory for preparing the initial listing, Should be used if the memory requirements can be fulfilled. If any specific configuration needs to be passed for these copy task it can be specified using the prefix hive.dbpath.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS", 35, "hive.repl.external.warehouse.single.copy.task.paths", "", "Comma separated list of paths for which single copy task shall be created for all the external tables within the locations Would require more memory for preparing the initial listing, Should be used if the memory requirements can be fulfilled. If the directory contains data not part of the database, that data would also get copied, so only locations which contains tables only belonging to the same database should be provided. This has no effect in case of table level replication or if hive.repl.bootstrap.external.tables isn\'t enabled. If any specific configuration needs to be passed for these copy task it can be specified using the prefix hive.dbpath.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_INCLUDE_AUTHORIZATION_METADATA", 36, "hive.repl.include.authorization.metadata", v, "This configuration will enable security and authorization related metadata along with the hive data and metadata replication. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_AUTHORIZATION_METADATA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_AUTHORIZATION_PROVIDER_SERVICE", 37, "hive.repl.authorization.provider.service", "ranger", "This configuration will define which service will provide the security and authorization related metadata that needs to be replicated along with the hive data and metadata replication. Set the configuration hive.repl.include.authorization.metadata to false to disable security policies being replicated ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_AUTHORIZATION_PROVIDER_SERVICE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RANGER_HANDLE_DENY_POLICY_TARGET", 38, "hive.repl.handle.ranger.deny.policy", v, "Indicates whether ranger deny policy for target database should be handled automatically by hive or not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_HANDLE_DENY_POLICY_TARGET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_REPL_FAILOVER_START", 39, "hive.repl.failover.start", v, "A replication policy level config to indicate if user wants to initiate fail-over to replicate the database in reverse direction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_FAILOVER_START> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RANGER_ADD_DENY_POLICY_TARGET", 40, "hive.repl.ranger.target.deny.policy", v, "This configuration will add a deny policy on the target database for all users except hive to avoid any update to the target database. Effective only if hive.repl.handle.ranger.deny.policy is setto true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_ADD_DENY_POLICY_TARGET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_RANGER_CLIENT_READ_TIMEOUT", 41, "hive.repl.ranger.client.read.timeout", "300s", v, "Ranger client read timeout for Ranger REST API calls.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RANGER_CLIENT_READ_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_INCLUDE_ATLAS_METADATA", 42, "hive.repl.include.atlas.metadata", v, "Indicates if Atlas metadata should be replicated along with Hive data and metadata or not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_INCLUDE_ATLAS_METADATA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_ATLAS_ENDPOINT", 43, "hive.repl.atlas.endpoint", null, "Atlas endpoint of the current cluster hive database is getting replicated from/to.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_ENDPOINT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_ATLAS_REPLICATED_TO_DB", 44, "hive.repl.atlas.replicatedto", null, "Target hive database name Atlas metadata of source hive database is being replicated to.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_REPLICATED_TO_DB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_ATLAS_CLIENT_READ_TIMEOUT", 45, "hive.repl.atlas.client.read.timeout", "7200s", v, "Atlas client read timeout for Atlas REST API calls.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_ATLAS_CLIENT_READ_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_EXTERNAL_CLIENT_CONNECT_TIMEOUT", 46, "hive.repl.external.client.connect.timeout", "10s", v, "Client connect timeout for REST API calls to external service.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_EXTERNAL_CLIENT_CONNECT_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_SOURCE_CLUSTER_NAME", 47, "hive.repl.source.cluster.name", null, "Name of the source cluster for the replication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SOURCE_CLUSTER_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_TARGET_CLUSTER_NAME", 48, "hive.repl.target.cluster.name", null, "Name of the target cluster for the replication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_TARGET_CLUSTER_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_RETRY_INTIAL_DELAY", 49, "hive.repl.retry.initial.delay", "60s", v, "Initial Delay before retry starts.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_INTIAL_DELAY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.2F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_RETRY_BACKOFF_COEFFICIENT", 50, "hive.repl.retry.backoff.coefficient", v, "The backoff coefficient for exponential retry delay between retries. Previous Delay * Backoff Coefficient will determine the next retry interval");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_BACKOFF_COEFFICIENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_RETRY_JITTER", 51, "hive.repl.retry.jitter", "30s", v, "A random jitter to be applied to avoid all retries happening at the same time.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_JITTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MINUTES>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_RETRY_MAX_DELAY_BETWEEN_RETRIES", 52, "hive.repl.retry.max.delay.between.retries", "60m", v, "Maximum allowed retry delay in minutes after including exponential backoff. If this limit is reached, retry will continue with this retry duration.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_MAX_DELAY_BETWEEN_RETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit HOURS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("REPL_RETRY_TOTAL_DURATION", 53, "hive.repl.retry.total.duration", "24h", v, "Total allowed retry duration in hours inclusive of all retries. Once this is exhausted, the policy instance will be marked as failed and will need manual intervention to restart.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_RETRY_TOTAL_DURATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_COPY_FILE_LIST_ITERATOR_RETRY", 54, "hive.repl.copy.file.list.iterator.retry", v, "Determines whether writes happen with retry upon encountering filesystem errors for data-copy \niterator files. It should be disabled when we do not want retry on a per-line basis while writing \nto the files and in cases when flushing capabilities are not available on the stream. If disabled, then retry \nis only attempted during file creation, not for errors encountered while writing entries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_COPY_FILE_LIST_ITERATOR_RETRY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_LOAD_PARTITIONS_BATCH_SIZE", 55, "hive.repl.load.partitions.batch.size", v, "Provide the maximum number of partitions of a table that will be batched together during  \nrepl load. All the partitions in a batch will make a single metastore call to update the metadata. \nThe data for these partitions will be copied before copying the metadata batch. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_LOAD_PARTITIONS_WITH_DATA_COPY_BATCH_SIZE", 56, "hive.repl.load.partitions.with.data.copy.batch.size", v, "Provide the maximum number of partitions of a table that will be batched together during  \nrepl load. All the partitions in a batch will make a single metastore call to update the metadata. \nThe data for these partitions will be copied before copying the metadata batch. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_LOAD_PARTITIONS_WITH_DATA_COPY_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_PARALLEL_COPY_TASKS", 57, "hive.repl.parallel.copy.tasks", v, "Provide the maximum number of parallel copy operation(distcp or regular copy) launched for a table  \nor partition. This will create at max 100 threads which will run copy in parallel for the data files at \n table or partition level. If hive.exec.parallel \nis set to true then max worker threads created for copy can be hive.exec.parallel.thread.number(determines \nnumber of copy tasks in parallel) * hive.repl.parallel.copy.tasks ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_PARALLEL_COPY_TASKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY", 58, "hive.repl.externaltable.snapshotdiff.copy", v, "Use snapshot diff for copying data from source to destination cluster for external table in distcp. If true it uses snapshot based distcp for all the paths configured as part of hive.repl.external.warehouse.single.copy.task along with the external warehouse default location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_SNAPSHOT_OVERWRITE_TARGET_FOR_EXTERNAL_TABLE_COPY", 59, "hive.repl.externaltable.snapshot.overwrite.target", v, "If this is enabled, in case the target is modified, when using snapshot for external tabledata copy, the target data is overwritten and the modifications are removed and the copy is again attempted using the snapshot based approach. If disabled, the replication will fail in case the target is modified.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_SNAPSHOT_OVERWRITE_TARGET_FOR_EXTERNAL_TABLE_COPY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_STATS_TOP_EVENTS_COUNTS", 60, "hive.repl.stats.events.count", v, "Number of topmost expensive events that needs to be maintained per event type for the replication statistics. Maximum permissible limit is 10.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_STATS_TOP_EVENTS_COUNTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("${system:java.io.tmpdir}\u0001${system:user.name}");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LOCALSCRATCHDIR", 61, "hive.exec.local.scratchdir", v, "Local scratch space for Hive jobs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALSCRATCHDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("${system:java.io.tmpdir}\u0001${hive.session.id}_resources");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DOWNLOADED_RESOURCES_DIR", 62, "hive.downloaded.resources.dir", v, "Temporary local directory for added resources in the remote file system.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DOWNLOADED_RESOURCES_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SCRATCHDIRPERMISSION", 63, "hive.scratch.dir.permission", "700", "The permission for the user specific scratch directories that get created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRATCHDIRPERMISSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SUBMITVIACHILD", 64, "hive.exec.submitviachild", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITVIACHILD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SUBMITLOCALTASKVIACHILD", 65, "hive.exec.submit.local.task.via.child", v, "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \nseparate JVM (true recommended) or not. \nAvoids the overhead of spawning new JVM, but can lead to out-of-memory issues.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SUBMITLOCALTASKVIACHILD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SCRIPTERRORLIMIT", 66, "hive.exec.script.maxerrsize", v, "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \nThis prevents runaway scripts from filling logs partitions to capacity");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SCRIPTERRORLIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("ALLOWPARTIALCONSUMP", 67, "hive.exec.script.allow.partial.consumption", v, "When enabled, this option allows a user script to exit successfully without consuming \nall the data from the standard input.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars ALLOWPARTIALCONSUMP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("STREAMREPORTERPERFIX", 68, "stream.stderr.reporter.prefix", "reporter:", "Streaming jobs that log to standard error with this prefix can log counter or status information.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERPERFIX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("STREAMREPORTERENABLED", 69, "stream.stderr.reporter.enabled", v, "Enable consumption of status and counter messages for streaming jobs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars STREAMREPORTERENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPRESSRESULT", 70, "hive.exec.compress.output", v, "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \nThe compression codec and other options are determined from Hadoop config variables mapred.output.compress*");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSRESULT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPRESSINTERMEDIATE", 71, "hive.exec.compress.intermediate", v, "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \nThe compression codec and other options are determined from Hadoop config variables mapred.output.compress*");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPRESSINTERMEDIATECODEC", 72, "hive.intermediate.compression.codec", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATECODEC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPRESSINTERMEDIATETYPE", 73, "hive.intermediate.compression.type", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPRESSINTERMEDIATETYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(256000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("BYTESPERREDUCER", 74, "hive.exec.reducers.bytes.per.reducer", v, "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars BYTESPERREDUCER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1009);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MAXREDUCERS", 75, "hive.exec.reducers.max", v, "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\nnegative, Hive will use this one as the max number of reducers when automatically determine number of reducers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXREDUCERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("PREEXECHOOKS", 76, "hive.exec.pre.hooks", "", "Comma-separated list of pre-execution hooks to be invoked for each statement. \nA pre-execution hook is specified as the name of a Java class which implements the \norg.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PREEXECHOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("POSTEXECHOOKS", 77, "hive.exec.post.hooks", "", "Comma-separated list of post-execution hooks to be invoked for each statement. \nA post-execution hook is specified as the name of a Java class which implements the \norg.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars POSTEXECHOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("ONFAILUREHOOKS", 78, "hive.exec.failure.hooks", "", "Comma-separated list of on-failure hooks to be invoked for each statement. \nAn on-failure hook is specified as the name of Java class which implements the \norg.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars ONFAILUREHOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("QUERYREDACTORHOOKS", 79, "hive.exec.query.redactor.hooks", "", "Comma-separated list of hooks to be invoked for each query which can \ntransform the query before it\'s placed in the job.xml file. Must be a Java class which \nextends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars QUERYREDACTORHOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CLIENTSTATSPUBLISHERS", 80, "hive.client.stats.publishers", "", "Comma-separated list of statistics publishers to be invoked on counters on each job. \nA client stats publisher is specified as the name of a Java class which implements the \norg.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENTSTATSPUBLISHERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("BASICSTATSTASKSMAXTHREADSFACTOR", 81, "hive.basic.stats.max.threads.factor", v, "Determines the maximum number of threads that can be used for collection of file level statistics. If the value configured is x, then the maximum number of threads that can be used is x multiplied by the number of available processors.  A value of less than 1, makes stats collection sequential.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars BASICSTATSTASKSMAXTHREADSFACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("EXECPARALLEL", 82, "hive.exec.parallel", v, "Whether to execute jobs in parallel");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(8);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("EXECPARALLETHREADNUMBER", 83, "hive.exec.parallel.thread.number", v, "How many jobs at most can be executed in parallel");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars EXECPARALLETHREADNUMBER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESPECULATIVEEXECREDUCERS", 84, "hive.mapred.reduce.tasks.speculative.execution", v, "(Deprecated) Whether speculative execution for reducers should be turned on. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESPECULATIVEEXECREDUCERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECOUNTERSPULLINTERVAL", 85, "hive.exec.counters.pull.interval", v, "The interval with which to poll the JobTracker for the counters the running job. \nThe smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERSPULLINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DYNAMICPARTITIONING", 86, "hive.exec.dynamic.partition", v, "Whether or not to allow dynamic partitions in DML/DDL.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "strict";
v[1] = "nonstrict";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("DYNAMICPARTITIONINGMODE", 87, "hive.exec.dynamic.partition.mode", "nonstrict", v, "In strict mode, the user must specify at least one static partition\nin case the user accidentally overwrites all partitions.\nIn nonstrict mode all partitions are allowed to be dynamic.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONINGMODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DYNAMICPARTITIONMAXPARTS", 88, "hive.exec.max.dynamic.partitions", v, "Maximum number of dynamic partitions allowed to be created in total.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DYNAMICPARTITIONMAXPARTSPERNODE", 89, "hive.exec.max.dynamic.partitions.pernode", v, "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONMAXPARTSPERNODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DYNAMICPARTITIONCONVERT", 90, "hive.exec.dynamic.partition.type.conversion", v, "Whether to check and cast a dynamic partition column before creating the partition directory. For example, if partition p is type int and we insert string \'001\', then if this value is true, directory p=1 will be created; if false, p=001");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DYNAMICPARTITIONCONVERT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MAXCREATEDFILES", 91, "hive.exec.max.created.files", v, "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAXCREATEDFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DEFAULTPARTITIONNAME", 92, "hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \nThis value must not contain any special character used in HDFS URI (e.g., \':\', \'%\', \'/\' etc). \nThe user has to be aware that the dynamic partition value should not contain this value to avoid confusions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULTPARTITIONNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DEFAULT_ZOOKEEPER_PARTITION_NAME", 93, "hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DEFAULT_ZOOKEEPER_PARTITION_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SHOW_JOB_FAIL_DEBUG_INFO", 94, "hive.exec.show.job.failure.debug.info", v, "If a job fails, whether to provide a link in the CLI to the task with the\nmost failures, along with debugging hints if applicable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SHOW_JOB_FAIL_DEBUG_INFO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("JOB_DEBUG_CAPTURE_STACKTRACES", 95, "hive.exec.job.debug.capture.stacktraces", v, "Whether or not stack traces parsed from the task logs of a sampled failed task \nfor each failed job should be stored in the SessionState");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_CAPTURE_STACKTRACES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(30000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("JOB_DEBUG_TIMEOUT", 96, "hive.exec.job.debug.timeout", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars JOB_DEBUG_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(20000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TASKLOG_DEBUG_TIMEOUT", 97, "hive.exec.tasklog.debug.timeout", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TASKLOG_DEBUG_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("OUTPUT_FILE_EXTENSION", 98, "hive.output.file.extension", null, "String used as a file extension for output files. \nIf not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars OUTPUT_FILE_EXTENSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEST", 99, "hive.in.test", v, "internal usage only, true in test mode", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEST_ICEBERG", 100, "hive.in.iceberg.test", v, "internal usage only, true when testing iceberg", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_ICEBERG> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEST_SSL", 101, "hive.in.ssl.test", v, "internal usage only, true in SSL test mode", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_SSL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEST_REPL", 102, "hive.in.repl.test", v, "internal usage only, true in replication test mode", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_REPL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEST_IDE", 103, "hive.in.ide.test", v, "internal usage only, true if test running in ide", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEST_IDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TESTING_SHORT_LOGS", 104, "hive.testing.short.logs", v, "internal usage only, used only in test mode. If set true, when requesting the operation logs the short version (generated by LogDivertAppenderForTest) will be returned");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_SHORT_LOGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TESTING_REMOVE_LOGS", 105, "hive.testing.remove.logs", v, "internal usage only, used only in test mode. If set false, the operation logs, and the operation log directory will not be removed, so they can be found after the test runs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TESTING_REMOVE_LOGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TEST_LOAD_HOSTNAMES", 106, "hive.test.load.hostnames", "", "Specify host names for load testing. (e.g., \"host,host,host3\"). Leave it empty if no load generation is needed (eg. for production).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_HOSTNAMES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TEST_LOAD_INTERVAL", 107, "hive.test.load.interval", "10ms", v, "The interval length used for load and idle periods in milliseconds.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.2F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TEST_LOAD_UTILIZATION", 108, "hive.test.load.utilization", v, "Specify processor load utilization between 0.0 (not loaded on all threads) and 1.0 (fully loaded on all threads). Comparing this with a random value the load generator creates hive.test.load.interval length active loops or idle periods");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_LOAD_UTILIZATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_TEZ_TEST", 109, "hive.in.tez.test", v, "internal use only, true when in testing tez", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_TEZ_TEST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD", 110, "hive.mapjoin.testing.no.hash.table.load", v, "internal use only, true when in testing map join", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN", 111, "hive.qtest.additional.partial.mask.pattern", "", "internal use only, used in only qtests. Provide additional partial masks patternfor qtests as a \',\' separated list");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_PATTERN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT", 112, "hive.qtest.additional.partial.mask.replacement.text", "", "internal use only, used in only qtests. Provide additional partial masks replacementtext for qtests as a \',\' separated list");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_PARTIAL_MASKS_REPLACEMENT_TEXT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_IN_REPL_TEST_FILES_SORTED", 113, "hive.in.repl.test.files.sorted", v, "internal usage only, set to true if the file listing is required in sorted order during bootstrap load", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IN_REPL_TEST_FILES_SORTED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LOCALMODEAUTO", 114, "hive.exec.mode.local.auto", v, "Let Hive determine whether to run in local mode automatically");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEAUTO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(134217728L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LOCALMODEMAXBYTES", 115, "hive.exec.mode.local.auto.inputbytes.max", v, "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXBYTES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LOCALMODEMAXINPUTFILES", 116, "hive.exec.mode.local.auto.input.files.max", v, "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LOCALMODEMAXINPUTFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DROP_IGNORES_NON_EXISTENT", 117, "hive.exec.drop.ignorenonexistent", v, "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a nonexistent table/view/function");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DROP_IGNORES_NON_EXISTENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEIGNOREMAPJOINHINT", 118, "hive.ignore.mapjoin.hint", v, "Ignore the mapjoin hint");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIGNOREMAPJOINHINT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_FILE_MAX_FOOTER", 119, "hive.file.max.footer", v, "maximum number of lines for footer user can define for a table file");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FILE_MAX_FOOTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES", 120, "hive.resultset.use.unique.column.names", v, "Make column names unique in the result set by qualifying column names with table alias if needed.\nTable alias will be added to column names for queries of type \"select *\" or \nif query explicitly uses table alias \"select r.x..\".");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(64);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PROTO_EVENTS_QUEUE_CAPACITY", 121, "hive.hook.proto.queue.capacity", v, "Queue capacity for the proto events logging threads.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_QUEUE_CAPACITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PROTO_EVENTS_BASE_PATH", 122, "hive.hook.proto.base-directory", "", "Base directory into which the proto event messages are written by HiveProtoLoggingHook.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_BASE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(86400L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL", 123, "hive.hook.proto.rollover-interval", "600s", v, "Frequency at which the file rollover check is triggered.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit DAYS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PROTO_EVENTS_CLEAN_FREQ", 124, "hive.hook.proto.events.clean.freq", "1d", v, "Frequency at which timer task runs to purge expired proto event files.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_CLEAN_FREQ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit DAYS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PROTO_EVENTS_TTL", 125, "hive.hook.proto.events.ttl", "7d", v, "Time-To-Live (TTL) of proto event files before cleanup.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_EVENTS_TTL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PROTO_FILE_PER_EVENT", 126, "hive.hook.proto.file.per.event", v, "Whether each proto event has to be written to separate file. (Use this for FS that does not hflush immediately like S3A)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PROTO_FILE_PER_EVENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findHadoopBinary()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HADOOPBIN", 127, "hadoop.bin.path", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPBIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findYarnBinary()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("YARNBIN", 128, "yarn.bin.path", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars YARNBIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String findMapRedBinary()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("MAPREDBIN", 129, "mapred.bin.path", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDBIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_FS_HAR_IMPL", 130, "fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem", "The implementation for accessing Hadoop Archives. Note that this won\'t be applicable to Hadoop versions less than 0.20");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_FS_HAR_IMPL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(256000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("MAPREDMAXSPLITSIZE", 131, "mapreduce.input.fileinputformat.split.maxsize", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMAXSPLITSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("MAPREDMINSPLITSIZE", 132, "mapreduce.input.fileinputformat.split.minsize", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("MAPREDMINSPLITSIZEPERNODE", 133, "mapreduce.input.fileinputformat.split.minsize.per.node", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERNODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("MAPREDMINSPLITSIZEPERRACK", 134, "mapreduce.input.fileinputformat.split.minsize.per.rack", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MAPREDMINSPLITSIZEPERRACK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HADOOPNUMREDUCERS", 135, "mapreduce.job.reduces", v, "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HADOOPNUMREDUCERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[5];
v[0] = "DERBY";
v[1] = "ORACLE";
v[2] = "MYSQL";
v[3] = "MSSQL";
v[4] = "POSTGRES";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTOREDBTYPE", 136, "hive.metastore.db.type", "DERBY", v, "Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREDBTYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTOREWAREHOUSE", 137, "hive.metastore.warehouse.dir", "/user/hive/warehouse", "location of default database for the warehouse");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREWAREHOUSE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_WAREHOUSE_EXTERNAL", 138, "hive.metastore.warehouse.external.dir", null, "Default location for external tables created in the warehouse. If not set or null, then the normal warehouse location will be used as the default location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_WAREHOUSE_EXTERNAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTOREURIS", 139, "hive.metastore.uris", "", "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREURIS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "SEQUENTIAL";
v[1] = "RANDOM";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORESELECTION", 140, "hive.metastore.uri.selection", "RANDOM", v, "Determines the selection mechanism used by metastore client to connect to remote metastore.  SEQUENTIAL implies that the first valid metastore from the URIs specified as part of hive.metastore.uris will be picked.  RANDOM implies that the metastore will be picked randomly");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESELECTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CAPABILITY_CHECK", 141, "hive.metastore.client.capability.check", v, "Whether to check client capabilities for potentially breaking API usage.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CAPABILITY_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_CAPABILITIES", 142, "hive.metastore.client.capabilities", "", "Capabilities possessed by HiveServer");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CAPABILITIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_CACHE_ENABLED", 143, "hive.metastore.client.cache.enabled", v, "Whether to enable metastore client cache");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_CLIENT_CACHE_EXPIRY_TIME", 144, "hive.metastore.client.cache.expiry.time", "120s", v, "Expiry time for metastore client cache");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_EXPIRY_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(50);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_CACHE_INITIAL_CAPACITY", 145, "hive.metastore.client.cache.initial.capacity", v, "Initial capacity for metastore client cache");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_INITIAL_CAPACITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(50);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_CACHE_MAX_CAPACITY", 146, "hive.metastore.client.cache.max.capacity", v, "Max capacity for metastore client cache");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_MAX_CAPACITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_CACHE_STATS_ENABLED", 147, "hive.metastore.client.cache.stats.enabled", v, "Whether to enable metastore client cache stats");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CACHE_STATS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_FASTPATH", 148, "hive.metastore.fastpath", v, "Used to avoid all of the proxies and object copies in the metastore.  Note, if this is set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise undefined and most likely undesired behavior will result");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FASTPATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_FS_HANDLER_THREADS_COUNT", 149, "hive.metastore.fshandler.threads", v, "Number of threads to be allocated for metastore handler for fs operations.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FS_HANDLER_THREADS_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_HBASE_FILE_METADATA_THREADS", 150, "hive.metastore.hbase.file.metadata.threads", v, "Number of threads to use to read file metadata in background to cache it.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_HBASE_FILE_METADATA_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_URI_RESOLVER", 151, "hive.metastore.uri.resolver", "", "If set, fully qualified class name of resolver for hive metastore uri\'s");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_URI_RESOLVER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORETHRIFTCONNECTIONRETRIES", 152, "hive.metastore.connect.retries", v, "Number of retries while opening a connection to metastore");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTCONNECTIONRETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORETHRIFTFAILURERETRIES", 153, "hive.metastore.failure.retries", v, "Number of retries upon failure of Thrift metastore calls");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORETHRIFTFAILURERETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(9083);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_SERVER_PORT", 154, "hive.metastore.port", v, "Hive metastore listener port");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SERVER_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_CLIENT_CONNECT_RETRY_DELAY", 155, "hive.metastore.client.connect.retry.delay", "1s", v, "Number of seconds for the client to wait between consecutive connection attempts");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_CONNECT_RETRY_DELAY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_CLIENT_SOCKET_TIMEOUT", 156, "hive.metastore.client.socket.timeout", "600s", v, "MetaStore Client socket timeout in seconds");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_CLIENT_SOCKET_LIFETIME", 157, "hive.metastore.client.socket.lifetime", "0s", v, "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\nreconnects on the next MetaStore operation. A value of 0s means the connection\nhas an infinite lifetime.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_SOCKET_LIFETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTOREPWD", 158, "javax.jdo.option.ConnectionPassword", "mine", "password to use against metastore database");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREPWD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORECONNECTURLHOOK", 159, "hive.metastore.ds.connection.url.hook", "", "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLHOOK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTOREMULTITHREADED", 160, "javax.jdo.option.Multithreaded", v, "Set this to true if multiple threads access metastore through JDO concurrently.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREMULTITHREADED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORECONNECTURLKEY", 161, "javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true", "JDBC connect string for a JDBC metastore.\nTo use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\nFor example, jdbc:postgresql://myhost/db?ssl=true for postgres database.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORECONNECTURLKEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DBACCESS_SSL_PROPS", 162, "hive.metastore.dbaccess.ssl.properties", "", "Comma-separated SSL properties for metastore to access database when JDO connection URL\nenables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DBACCESS_SSL_PROPS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HMSHANDLERATTEMPTS", 163, "hive.hmshandler.retry.attempts", v, "The number of times to retry a HMSHandler call if there were a connection error.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERATTEMPTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HMSHANDLERINTERVAL", 164, "hive.hmshandler.retry.interval", "2000ms", v, "The time between HMSHandler retry attempts on failure.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HMSHANDLERFORCERELOADCONF", 165, "hive.hmshandler.force.reload.conf", v, "Whether to force reloading of the HMSHandler configuration (including\nthe connection URL, before the next metastore query that accesses the\ndatastore. Once reloaded, this value is reset to false. Used for\ntesting only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HMSHANDLERFORCERELOADCONF> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(104857600L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORESERVERMAXMESSAGESIZE", 166, "hive.metastore.server.max.message.size", v, "Maximum message size in bytes a HMS will accept.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXMESSAGESIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(200);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORESERVERMINTHREADS", 167, "hive.metastore.server.min.threads", v, "Minimum number of worker threads in the Thrift server\'s pool.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMINTHREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORESERVERMAXTHREADS", 168, "hive.metastore.server.max.threads", v, "Maximum number of worker threads in the Thrift server\'s pool.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORESERVERMAXTHREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TCP_KEEP_ALIVE", 169, "hive.metastore.server.tcp.keepalive", v, "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TCP_KEEP_ALIVE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_WM_DEFAULT_POOL_SIZE", 170, "hive.metastore.wm.default.pool.size", v, "The size of a default pool to create when creating an empty resource plan;\nIf not positive, no default pool will be created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_WM_DEFAULT_POOL_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_INT_ORIGINAL", 171, "hive.metastore.archive.intermediate.original", "_INTERMEDIATE_ORIGINAL", "Intermediate dir suffixes used for archiving. Not important what they\nare, as long as collisions are avoided");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ORIGINAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_INT_ARCHIVED", 172, "hive.metastore.archive.intermediate.archived", "_INTERMEDIATE_ARCHIVED", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_ARCHIVED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_INT_EXTRACTED", 173, "hive.metastore.archive.intermediate.extracted", "_INTERMEDIATE_EXTRACTED", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INT_EXTRACTED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_KERBEROS_KEYTAB_FILE", 174, "hive.metastore.kerberos.keytab.file", "", "The path to the Kerberos Keytab file containing the metastore Thrift server\'s service principal.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_KEYTAB_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_KERBEROS_PRINCIPAL", 175, "hive.metastore.kerberos.principal", "hive-metastore/_HOST@EXAMPLE.COM", "The service principal for the metastore Thrift server. \nThe special string _HOST will be replaced automatically with the correct host name.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_KERBEROS_PRINCIPAL", 176, "hive.metastore.client.kerberos.principal", "", "The Kerberos principal associated with the HA cluster of hcat_servers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_USE_THRIFT_SASL", 177, "hive.metastore.sasl.enabled", v, "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_SASL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_USE_THRIFT_FRAMED_TRANSPORT", 178, "hive.metastore.thrift.framed.transport.enabled", v, "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_FRAMED_TRANSPORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_USE_THRIFT_COMPACT_PROTOCOL", 179, "hive.metastore.thrift.compact.protocol.enabled", v, "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\nSetting it to true will break compatibility with older clients running TBinaryProtocol.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_THRIFT_COMPACT_PROTOCOL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TOKEN_SIGNATURE", 180, "hive.metastore.token.signature", "", "The delegation token service name to match when selecting a token from the current user\'s tokens.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TOKEN_SIGNATURE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS", 181, "hive.cluster.delegation.token.store.class", "org.apache.hadoop.hive.thrift.MemoryTokenStore", "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR", 182, "hive.cluster.delegation.token.store.zookeeper.connectString", "", "The ZooKeeper token store connect string. You can re-use the configuration value\nset in hive.zookeeper.quorum, by leaving this parameter unset.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE", 183, "hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation", "The root path for token store data. Note that this is used by both HiveServer and\nMetaStore to store delegation Token. One directory gets created for each of them.\nThe final directory names would have the servername appended to it (HIVESERVER2,\nMETASTORE).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL", 184, "hive.cluster.delegation.token.store.zookeeper.acl", "", "ACL for token store entries. Comma separated list of ACL entries. For example:\nsasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa\nDefaults to all permissions for the hiveserver2/metastore process user.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CACHE_PINOBJTYPES", 185, "hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order", "List of comma separated metastore object types that should be pinned in the cache");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_PINOBJTYPES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "DBCP";
v[1] = "HikariCP";
v[2] = "NONE";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_CONNECTION_POOLING_TYPE", 186, "datanucleus.connectionPoolingType", "HikariCP", v, "Specify connection pool library for datanucleus");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS", 187, "datanucleus.connectionPool.maxPoolSize", v, "Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\nrecommended to take into account the number of metastore instances and the number of HiveServer instances\nconfigured with embedded metastore. To get optimal performance, set config to meet the following condition\n(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n(2 * physical_core_count + hard_disk_count).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DATANUCLEUS_INIT_COL_INFO", 188, "datanucleus.rdbms.initializeColumnInfo", "NONE", "initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DATANUCLEUS_INIT_COL_INFO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_VALIDATE_TABLES", 189, "datanucleus.schema.validateTables", v, "validates existing schema against code. turn this on if you want to verify existing schema");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_VALIDATE_COLUMNS", 190, "datanucleus.schema.validateColumns", v, "validates existing schema against code. turn this on if you want to verify existing schema");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_COLUMNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_VALIDATE_CONSTRAINTS", 191, "datanucleus.schema.validateConstraints", v, "validates existing schema against code. turn this on if you want to verify existing schema");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_VALIDATE_CONSTRAINTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_STORE_MANAGER_TYPE", 192, "datanucleus.storeManagerType", "rdbms", "metadata store type");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_STORE_MANAGER_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AUTO_CREATE_ALL", 193, "datanucleus.schema.autoCreateAll", v, "Auto creates necessary schema on a startup if one doesn\'t exist. Set this to false, after creating it once.To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not recommended for production use cases, run schematool command instead.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTO_CREATE_ALL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_SCHEMA_VERIFICATION", 194, "hive.metastore.schema.verification", v, "Enforce metastore schema version consistency.\nTrue: Verify that version information stored in is compatible with one from Hive jars.  Also disable automatic\n      schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n      proper metastore schema migration. (Default)\nFalse: Warn if the version information stored in metastore doesn\'t match with one from in Hive jars.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION", 195, "hive.metastore.schema.verification.record.version", v, "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n enabled the MS will be unusable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_SCHEMA_INFO_CLASS", 196, "hive.metastore.schema.info.class", "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", "Fully qualified class name for the metastore schema information class \nwhich is used by schematool to fetch the schema information.\n This class should implement the IMetaStoreSchemaInfo interface");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_SCHEMA_INFO_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TRANSACTION_ISOLATION", 197, "datanucleus.transactionIsolation", "read-committed", "Default transaction isolation level for identity generation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTION_ISOLATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CACHE_LEVEL2", 198, "datanucleus.cache.level2", v, "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CACHE_LEVEL2_TYPE", 199, "datanucleus.cache.level.type", "none", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CACHE_LEVEL2_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_IDENTIFIER_FACTORY", 200, "datanucleus.identifierFactory", "datanucleus1", "Name of the identifier factory to use when generating table/column names etc. \n\'datanucleus1\' is used for backward compatibility with DataNucleus v1");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_IDENTIFIER_FACTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_USE_LEGACY_VALUE_STRATEGY", 201, "datanucleus.rdbms.useLegacyNativeValueStrategy", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_USE_LEGACY_VALUE_STRATEGY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK", 202, "datanucleus.plugin.pluginRegistryBundleCheck", "LOG", "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(300);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_BATCH_RETRIEVE_MAX", 203, "hive.metastore.batch.retrieve.max", v, "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \nThe higher the number, the less the number of round trips is needed to the Hive metastore server, \nbut it may also cause higher memory requirement at the client side.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_MAX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_BATCH_RETRIEVE_OBJECTS_MAX", 204, "hive.metastore.batch.retrieve.table.partition.max", v, "Maximum number of objects that metastore internally retrieves in one batch.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_BATCH_RETRIEVE_OBJECTS_MAX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_INIT_HOOKS", 205, "hive.metastore.init.hooks", "", "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \nAn init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INIT_HOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_PRE_EVENT_LISTENERS", 206, "hive.metastore.pre.event.listeners", "", "List of comma separated listeners for metastore events.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PRE_EVENT_LISTENERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_EVENT_LISTENERS", 207, "hive.metastore.event.listeners", "", "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_LISTENERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE", 208, "hive.write.notification.max.batch.size", v, "Max number of write notification logs sent in a batch ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TRANSACTIONAL_EVENT_LISTENERS", 209, "hive.metastore.transactional.event.listeners", "", "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRANSACTIONAL_EVENT_LISTENERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES", 210, "hive.notification.sequence.lock.max.retries", v, "Number of retries required to acquire a lock when getting the next notification sequential ID for entries in the NOTIFICATION_LOG table.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10L);
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String name()>();
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Sleep interval between retries to acquire a notification lock as described part of property \u0001");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL", 211, "hive.notification.sequence.lock.retry.sleep.interval", v, v, v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_EVENT_DB_LISTENER_TTL", 212, "hive.metastore.event.db.listener.timetolive", "86400s", v, "time after which events will be removed from the database listener queue when repl.cm.enabled \nis set to false. When repl.cm.enabled is set to true, repl.event.db.listener.timetolive is used instead");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_LISTENER_TTL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_EVENT_DB_NOTIFICATION_API_AUTH", 213, "hive.metastore.event.db.notification.api.auth", v, "Should metastore do authorization against database notification related APIs such as get_next_notification.\nIf set to true, then only the superusers in proxy settings have the permission");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_DB_NOTIFICATION_API_AUTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS", 214, "hive.metastore.authorization.storage.checks", v, "Should the metastore do authorization checks against the underlying storage (usually hdfs) \nfor operations like drop-partition (disallow the drop-partition if the user in\nquestion doesn\'t have permissions to delete the corresponding directory\non the storage).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK", 215, "hive.metastore.authorization.storage.check.externaltable.drop", v, "Should StorageBasedAuthorization check permission of the storage before dropping external table.\nStorageBasedAuthorization already does this check for managed table. For external table however,\nanyone who has read permission of the directory could drop external table, which is surprising.\nThe flag is set to false by default to maintain backward compatibility.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_EVENT_CLEAN_FREQ", 216, "hive.metastore.event.clean.freq", "0s", v, "Frequency at which timer task runs to purge expired events in metastore.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_CLEAN_FREQ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_EVENT_EXPIRY_DURATION", 217, "hive.metastore.event.expiry.duration", "0s", v, "Duration after which events expire from events table");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_EXPIRY_DURATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_EVENT_MESSAGE_FACTORY", 218, "hive.metastore.event.message.factory", "org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder", "Factory class for making encoding and decoding messages in the events generated.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EVENT_MESSAGE_FACTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_EXECUTE_SET_UGI", 219, "hive.metastore.execute.setugi", v, "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \nthe client\'s reported user and group permissions. Note that this property must be set on \nboth the client and server sides. Further note that its best effort. \nIf client sets its to true and server sets it to false, client setting will be ignored.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXECUTE_SET_UGI> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_PARTITION_NAME_WHITELIST_PATTERN", 220, "hive.metastore.partition.name.whitelist.pattern", "", "Partition names will be checked against this regex pattern and rejected if not matched.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PARTITION_NAME_WHITELIST_PATTERN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_INTEGER_JDO_PUSHDOWN", 221, "hive.metastore.integral.jdo.pushdown", v, "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\nimproves metastore perf for integral columns, especially if there\'s a large number of partitions.\nHowever, it doesn\'t work correctly with integral values that are not normalized (e.g. have\nleading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\nis also irrelevant.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_INTEGER_JDO_PUSHDOWN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TRY_DIRECT_SQL", 222, "hive.metastore.try.direct.sql", v, "Whether the Hive metastore should try to use direct SQL queries instead of the\nDataNucleus for certain read paths. This can improve metastore performance when\nfetching many partitions or column statistics by orders of magnitude; however, it\nis not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\nthe metastore will fall back to the DataNucleus, so it\'s safe even if SQL doesn\'t\nwork for all queries on your datastore. If all SQL queries fail (for example, your\nmetastore is backed by MongoDB), you might want to disable this to save the\ntry-and-fall-back cost.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE", 223, "hive.metastore.direct.sql.batch.size", v, "Batch size for partition and other object retrieval from the underlying DB in direct\nSQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\nthat necessitate this. For DBs that can handle the queries, this isn\'t necessary and\nmay impede performance. -1 means no batching, 0 means automatic batching.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TRY_DIRECT_SQL_DDL", 224, "hive.metastore.try.direct.sql.ddl", v, "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\nmodifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\nselect query has incorrect syntax or something similar inside a transaction, the\nentire transaction will fail and fall-back to DataNucleus will not be possible. You\nshould disable the usage of direct SQL inside transactions if that happens in your case.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TRY_DIRECT_SQL_DDL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH", 225, "hive.direct.sql.max.query.length", v, "The maximum\n size of a query string (in KB).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE", 226, "hive.direct.sql.max.elements.in.clause", v, "The maximum number of values in a IN clause. Once exceeded, it will be broken into\n multiple OR separated IN clauses.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE", 227, "hive.direct.sql.max.elements.values.clause", v, "The maximum number of values in a VALUES clause for INSERT statement.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS", 228, "hive.metastore.orm.retrieveMapNullsAsEmptyStrings", v, "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, pruning is the correct behaviour");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES", 229, "hive.metastore.disallow.incompatible.col.type.changes", v, "If true (default is false), ALTER TABLE operations which change the type of a\ncolumn (say STRING) to an incompatible type (say MAP) are disallowed.\nRCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\ndatatypes can be converted from string to any type. The map is also serialized as\na string, which can be read as a string as well. However, with any binary\nserialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\nwhen subsequently trying to access old partitions.\n\nPrimitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\nnot blocked.\n\nSee HIVE-4409 for more details.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_LIMIT_PARTITION_REQUEST", 230, "hive.metastore.limit.partition.request", v, "This limits the number of partitions that can be requested from the metastore for a given table.\nThe default value \"-1\" means no limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_LIMIT_PARTITION_REQUEST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("NEWTABLEDEFAULTPARA", 231, "hive.table.parameters.default", "", "Default property values for newly created tables");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NEWTABLEDEFAULTPARA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DDL_CTL_PARAMETERS_WHITELIST", 232, "hive.ddl.createtablelike.properties.whitelist", "", "Table Properties to copy over when executing a Create Table Like.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DDL_CTL_PARAMETERS_WHITELIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_RAW_STORE_IMPL", 233, "hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore", "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \nThis class is used to store and retrieval of raw metadata objects such as table, database");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_RAW_STORE_IMPL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_TXN_STORE_IMPL", 234, "hive.metastore.txn.store.impl", "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler", "Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore.  This class is used to store and retrieve transactions and locks");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_TXN_STORE_IMPL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CONNECTION_DRIVER", 235, "javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver", "Driver class name for a JDBC metastore");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_DRIVER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_MANAGER_FACTORY_CLASS", 236, "javax.jdo.PersistenceManagerFactoryClass", "org.datanucleus.api.jdo.JDOPersistenceManagerFactory", "class implementing the jdo persistence");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_MANAGER_FACTORY_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_EXPRESSION_PROXY_CLASS", 237, "hive.metastore.expression.proxy", "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_EXPRESSION_PROXY_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_DETACH_ALL_ON_COMMIT", 238, "javax.jdo.option.DetachAllOnCommit", v, "Detaches all objects from session so that they can be used after transaction is committed");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_DETACH_ALL_ON_COMMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_NON_TRANSACTIONAL_READ", 239, "javax.jdo.option.NonTransactionalRead", v, "Reads outside of transactions");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_NON_TRANSACTIONAL_READ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CONNECTION_USER_NAME", 240, "javax.jdo.option.ConnectionUserName", "APP", "Username to use against metastore database");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CONNECTION_USER_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_END_FUNCTION_LISTENERS", 241, "hive.metastore.end.function.listeners", "", "List of comma separated listeners for the end of metastore functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_END_FUNCTION_LISTENERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_PART_INHERIT_TBL_PROPS", 242, "hive.metastore.partition.inherit.table.properties", "", "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n* implies all the keys will get inherited.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_PART_INHERIT_TBL_PROPS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_FILTER_HOOK", 243, "hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl", "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manageris set to instance of HiveAuthorizerFactory, then this value is ignored.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_FILTER_HOOK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("FIRE_EVENTS_FOR_DML", 244, "hive.metastore.dml.events", v, "If true, the metastore will be asked to fire events for DML operations");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars FIRE_EVENTS_FOR_DML> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS", 245, "hive.metastore.client.drop.partitions.using.expressions", v, "Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, or drops partitions iteratively");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_ENABLED", 246, "hive.metastore.aggregate.stats.cache.enabled", v, "Whether aggregate stats caching is enabled or not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_SIZE", 247, "hive.metastore.aggregate.stats.cache.size", v, "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS", 248, "hive.metastore.aggregate.stats.cache.max.partitions", v, "Maximum number of partitions that are aggregated per cache node.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.01F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_FPP", 249, "hive.metastore.aggregate.stats.cache.fpp", v, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_FPP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.01F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE", 250, "hive.metastore.aggregate.stats.cache.max.variance", v, "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_TTL", 251, "hive.metastore.aggregate.stats.cache.ttl", "600s", v, "Number of seconds for a cached node to be active in the cache before they become stale.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_TTL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT", 252, "hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms", v, "Number of milliseconds a writer will wait to acquire the writelock before giving up.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT", 253, "hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms", v, "Number of milliseconds a reader will wait to acquire the readlock before giving up.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.9F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL", 254, "hive.metastore.aggregate.stats.cache.max.full", v, "Maximum cache full % after which the cache cleaner thread kicks in.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.8F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL", 255, "hive.metastore.aggregate.stats.cache.clean.until", v, "The cleaner thread cleans until cache reaches this % full size.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METASTORE_METRICS", 256, "hive.metastore.metrics.enabled", v, "Enable metrics on the metastore.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTORE_METRICS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_USE_SSL", 257, "hive.metastore.use.SSL", v, "Set this to true for using SSL encryption in HMS server.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_USE_SSL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_SSL_KEYSTORE_PATH", 258, "hive.metastore.keystore.path", "", "Metastore SSL certificate keystore location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_SSL_KEYSTORE_PASSWORD", 259, "hive.metastore.keystore.password", "", "Metastore SSL certificate keystore password.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_KEYSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_SSL_TRUSTSTORE_PATH", 260, "hive.metastore.truststore.path", "", "Metastore SSL certificate truststore location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD", 261, "hive.metastore.truststore.password", "", "Metastore SSL certificate truststore password.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("METADATA_EXPORT_LOCATION", 262, "hive.metadata.export.location", "", "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \nit is the location to which the metadata will be exported. The default is an empty string, which results in the \nmetadata being exported to the current user\'s home directory on HDFS.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METADATA_EXPORT_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MOVE_EXPORTED_METADATA_TO_TRASH", 263, "hive.metadata.move.exported.metadata.to.trash", v, "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \nthis setting determines if the metadata that is exported will subsequently be moved to the user\'s trash directory \nalongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MOVE_EXPORTED_METADATA_TO_TRASH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CLIIGNOREERRORS", 264, "hive.cli.errors.ignore", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIIGNOREERRORS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CLIPRINTCURRENTDB", 265, "hive.cli.print.current.db", v, "Whether to include the current database in the Hive prompt.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPRINTCURRENTDB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CLIPROMPT", 266, "hive.cli.prompt", "hive", "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \nVariable substitution will only be invoked at the Hive CLI startup.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIPROMPT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_FS_HANDLER_CLS", 267, "hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_FS_HANDLER_CLS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESESSIONID", 268, "hive.session.id", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESESSIONSILENT", 269, "hive.session.silent", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESESSIONSILENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCAL_TIME_ZONE", 270, "hive.local.time.zone", "LOCAL", "Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\nLOCAL, it is not specified, or it is not a correct time-zone, the system default time-zone will be\n used instead. Time-zone IDs can be specified as region-based zone IDs (based on IANA time-zone data),\nabbreviated zone IDs, or offset IDs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCAL_TIME_ZONE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SESSION_HISTORY_ENABLED", 271, "hive.session.history.enabled", v, "Whether to log Hive query, query plan, runtime statistics etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_HISTORY_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEQUERYSTRING", 272, "hive.query.string", "", "Query being executed (might be multiple per a session)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYSTRING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEQUERYID", 273, "hive.query.id", "", "ID for query being executed (might be multiple per a session)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEQUERYTAG", 274, "hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. Also \'kill query\' needs to be issued to all HiveServer instances to proper kill the queries");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYTAG> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(50);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEJOBNAMELENGTH", 275, "hive.jobname.length", v, "max jobname length");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOBNAMELENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEJAR", 276, "hive.jar.path", "", "The location of hive_cli.jar that is used when submitting jobs in a separate jvm.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJAR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEAUXJARS", 277, "hive.aux.jars.path", "", "The location of the plugin jars that contain implementations of user defined functions and serdes.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEAUXJARS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVERELOADABLEJARS", 278, "hive.reloadable.aux.jars.path", "", "The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\nby executing reload command. And these jars can be used as the auxiliary classes like creating a UDF or SerDe.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVERELOADABLEJARS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEADDEDFILES", 279, "hive.added.files.path", "", "This an internal parameter.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEADDEDJARS", 280, "hive.added.jars.path", "", "This an internal parameter.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDJARS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEADDEDARCHIVES", 281, "hive.added.archives.path", "", "This an internal parameter.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDEDARCHIVES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEADDFILESUSEHDFSLOCATION", 282, "hive.resource.use.hdfs.location", v, "Reference HDFS based files/jars directly instead of copy to session based HDFS scratch directory, to make distributed cache more useful.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEADDFILESUSEHDFSLOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_CURRENT_DATABASE", 283, "hive.current.database", "", "Database name used by current session. Internal usage only.", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CURRENT_DATABASE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVES_AUTO_PROGRESS_TIMEOUT", 284, "hive.auto.progress.timeout", "0s", v, "How long to run autoprogressor for the script/UDTF operators.\nSet to 0 for forever.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVES_AUTO_PROGRESS_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTAUTOPROGRESS", 285, "hive.script.auto.progress", v, "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \nto avoid the task getting killed because of inactivity.  Hive sends progress information when the script is \noutputting to stderr.  This option removes the need of periodically producing stderr messages, \nbut users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTAUTOPROGRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTIDENVVAR", 286, "hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", "Name of the environment variable that holds the unique script operator ID in the user\'s \ntransform function (the custom mapper/reducer that the user has specified in the query)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTIDENVVAR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTTRUNCATEENV", 287, "hive.script.operator.truncate.env", v, "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTTRUNCATEENV> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPT_ENV_BLACKLIST", 288, "hive.script.operator.env.blacklist", "hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist,hive.repl.current.table.write.id", "Comma separated list of keys from the configuration file not to convert to environment variables when invoking the script operator");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPT_ENV_BLACKLIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT", 289, "hive.strict.checks.orderby.no.limit", v, "Enabling strict large query checks disallows the following:\n  Orderby without limit.\nNote that this check currently does not consider data size, only the query pattern.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_CHECKS_NO_PARTITION_FILTER", 290, "hive.strict.checks.no.partition.filter", v, "Enabling strict large query checks disallows the following:\n  No partition being picked up for a query against partitioned table.\nNote that this check currently does not consider data size, only the query pattern.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_NO_PARTITION_FILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_CHECKS_TYPE_SAFETY", 291, "hive.strict.checks.type.safety", v, "Enabling strict type safety checks disallows the following:\n  Comparing bigints and strings/(var)chars.\n  Comparing bigints and doubles.\n  Comparing decimals and strings/(var)chars.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_TYPE_SAFETY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_CHECKS_CARTESIAN", 292, "hive.strict.checks.cartesian.product", v, "Enabling strict Cartesian join checks disallows the following:\n  Cartesian product (cross join).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_CARTESIAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_CHECKS_BUCKETING", 293, "hive.strict.checks.bucketing", v, "Enabling strict bucketing checks disallows the following:\n  Load into bucketed tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_CHECKS_BUCKETING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_TIMESTAMP_CONVERSION", 294, "hive.strict.timestamp.conversion", v, "Restricts unsafe numeric to timestamp conversions");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_TIMESTAMP_CONVERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOAD_DATA_OWNER", 295, "hive.load.data.owner", "", "Set the owner of files loaded using load data in managed tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DATA_OWNER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPREDMODE", 296, "hive.mapred.mode", null, "Deprecated; use hive.strict.checks.* settings instead.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPREDMODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEALIAS", 297, "hive.alias", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEALIAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPSIDEAGGREGATE", 298, "hive.map.aggr", v, "Whether to use map-side aggregation in Hive Group By queries");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPSIDEAGGREGATE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEGROUPBYSKEW", 299, "hive.groupby.skewindata", v, "Whether there is skew in data to optimize group by queries");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYSKEW> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS", 300, "hive.join.shortcut.unmatched.rows", v, "Enables to shortcut processing of known filtered rows in merge joins. internal use only. may affect correctness");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEJOINEMITINTERVAL", 301, "hive.join.emit.interval", v, "How many rows in the right-most join operand Hive should buffer before emitting the join result.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINEMITINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(25000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEJOINCACHESIZE", 302, "hive.join.cache.size", v, "How many rows in the joining tables (except the streaming table) should be cached in memory.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEJOINCACHESIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PUSH_RESIDUAL_INNER", 303, "hive.join.inner.residual", v, "Whether to push non-equi filter predicates within inner joins. This can improve efficiency in the evaluation of certain joins, since we will not be emitting rows which are thrown away by a Filter operator straight away. However, currently vectorization does not support them, thus enabling it is only recommended when vectorization is disabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PUSH_RESIDUAL_INNER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PTF_RANGECACHE_SIZE", 304, "hive.ptf.rangecache.size", v, "Size of the cache used on reducer side, that stores boundaries of ranges within a PTF partition. Used if a query specifies a RANGE type window including an orderby clause.Set this to 0 to disable this cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_RANGECACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PTF_VALUECACHE_SIZE", 305, "hive.ptf.valuecache.size", v, "Size of the cache used on reducer side, that stores calculated values for ranges within a PTF partition. Set this to 0 to disable this cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PTF_VALUECACHE_COLLECT_STATISTICS", 306, "hive.ptf.valuecache.collect.statistics", v, "Whether to collect cache statistics in PTFValueCache. On extremely hot codepaths, this can be expensive, so it\'s disabled by default. It\'s only for development/debugging purposes, execution engine doesn\'t take advantage of statistics stored in the cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PTF_VALUECACHE_COLLECT_STATISTICS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_ENABLED", 307, "hive.cbo.enable", v, "Flag to control enabling Cost Based Optimizations using Calcite framework.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[4];
v[0] = "NEVER";
v[1] = "CONSERVATIVE";
v[2] = "ALWAYS";
v[3] = "TEST";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(boolean,java.lang.String[])>(1, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_CBO_FALLBACK_STRATEGY", 308, "hive.cbo.fallback.strategy", "CONSERVATIVE", v, "The strategy defines when Hive fallbacks to legacy optimizer when CBO fails:NEVER, never use the legacy optimizer (all CBO errors are fatal);ALWAYS, always use the legacy optimizer (CBO errors are not fatal);CONSERVATIVE, use the legacy optimizer only when the CBO error is not related to subqueries and views;TEST, specific behavior only for tests, do not use in production");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_FALLBACK_STRATEGY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_CNF_NODES_LIMIT", 309, "hive.cbo.cnf.maxnodes", v, "When converting to conjunctive normal form (CNF), fail ifthe expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves andinterior nodes). -1 to not set up a threshold.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_CNF_NODES_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_RETPATH_HIVEOP", 310, "hive.cbo.returnpath.hiveop", v, "Flag to control calcite plan to hive operator conversion");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RETPATH_HIVEOP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_EXTENDED_COST_MODEL", 311, "hive.cbo.costmodel.extended", v, "Flag to control enabling the extended cost model based onCPU, IO and cardinality. Otherwise, the cost model is based on cardinality.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_EXTENDED_COST_MODEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_CPU", 312, "hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_CPU> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_NET", 313, "hive.cbo.costmodel.network", "150.0", "Default cost of a transferring a byte over network; expressed as multiple of CPU cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_NET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_LFS_WRITE", 314, "hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS; expressed as multiple of NETWORK cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_WRITE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_LFS_READ", 315, "hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS; expressed as multiple of NETWORK cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_LFS_READ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_HDFS_WRITE", 316, "hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS; expressed as multiple of Local FS write cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_WRITE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_COST_MODEL_HDFS_READ", 317, "hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS; expressed as multiple of Local FS read cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_COST_MODEL_HDFS_READ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_RULE_EXCLUSION_REGEX", 318, "hive.cbo.rule.exclusion.regex", "", "Regex over rule descriptions to exclude them from planning. The intended usage is to allow to disable rules from problematic queries, it is *not* a performance tuning property. The property is experimental, it can be changed or removed without any notice.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_RULE_EXCLUSION_REGEX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_SHOW_WARNINGS", 319, "hive.cbo.show.warnings", v, "Toggle display of CBO warnings like missing column stats");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_SHOW_WARNINGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS", 320, "hive.cbo.stats.correlated.multi.key.joins", v, "When CBO estimates output rows for a join involving multiple columns, the default behavior assumesthe columns are independent. Setting this flag to true will cause the estimator to assumethe columns are correlated.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CARDINALITY_PRESERVING_JOIN_OPTIMIZATION_FACTOR", 321, "hive.cardinality.preserving.join.optimization.factor", v, "Original plan cost multiplier for rewriting when query has tables joined multiple time on primary/unique key and projected the majority of columns from these table. This optimization trims fields at root of tree and then joins back affected tables at top of tree to get rest of columns. Set this to 0.0 to disable this optimization or increase it for more aggressive optimization.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CARDINALITY_PRESERVING_JOIN_OPTIMIZATION_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("AGGR_JOIN_TRANSPOSE", 322, "hive.transpose.aggr.join", v, "push aggregates through join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("AGGR_JOIN_TRANSPOSE_UNIQUE", 323, "hive.transpose.aggr.join.unique", v, "push aggregates through join(s) in case data is regrouped on a previously unique column");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars AGGR_JOIN_TRANSPOSE_UNIQUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SEMIJOIN_CONVERSION", 324, "hive.optimize.semijoin.conversion", v, "convert group by followed by inner equi join into semijoin");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMIJOIN_CONVERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COLUMN_ALIGNMENT", 325, "hive.order.columnalignment", v, "Flag to control whether we want to try to aligncolumns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COLUMN_ALIGNMENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING", 326, "hive.materializedview.rewriting", v, "Whether to try to rewrite queries using the materialized views enabled for rewriting");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SQL", 327, "hive.materializedview.rewriting.sql", v, "Whether to try to rewrite queries using the materialized views enabled for rewriting by comparing the sql query syntax tree with the materialized views query syntax tree");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SQL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SUBQUERY_SQL", 328, "hive.materializedview.rewriting.sql.subquery", v, "Whether to try to rewrite sub-queries using the materialized views enabled for rewriting by comparing the sql sub-query syntax tree with the materialized views query syntax tree");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING_SUBQUERY_SQL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "heuristic";
v[1] = "costbased";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY", 329, "hive.materializedview.rewriting.strategy", "heuristic", v, "The strategy that should be used to cost and select the materialized view rewriting. \n  heuristic: Always try to select the plan using the materialized view if rewriting produced one,choosing the plan with lower cost among possible plans containing a materialized view\n  costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether it uses a materialized view or not");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MINUTES>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW", 330, "hive.materializedview.rewriting.time.window", "0min", v, "Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.\nFor instance, if more time than the value assigned to the property has passed since the materialized view was created or rebuilt, and one of its source tables has changed since, the materialized view will not be considered for rewriting. Default value 0 means that the materialized view cannot be outdated to be used automatically in query rewriting. Value -1 means to skip this check.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL", 331, "hive.materializedview.rewriting.incremental", v, "Whether to try to execute incremental rewritings based on outdated materializations and\ncurrent content of tables. Default value of true effectively amounts to enabling incremental\nrebuild for the materializations too.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL", 332, "hive.materializedview.rebuild.incremental", v, "Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild\ntries to modify the original materialization contents to reflect the latest changes to the\nmaterialized view source tables, instead of rebuilding the contents fully. Incremental rebuild\nis based on the materialized view algebraic incremental rewriting.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR", 333, "hive.materializedview.rebuild.incremental.factor", v, "The estimated cost of the resulting plan for incremental maintenance of materialization\nwith aggregations will be multiplied by this value. Reducing the value can be useful to\nfavour incremental rebuild over full rebuild.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[6];
v[0] = "none";
v[1] = "TextFile";
v[2] = "SequenceFile";
v[3] = "RCfile";
v[4] = "ORC";
v[5] = "parquet";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_MATERIALIZED_VIEW_FILE_FORMAT", 334, "hive.materializedview.fileformat", "ORC", v, "Default file format for CREATE MATERIALIZED VIEW statement");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_FILE_FORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MATERIALIZED_VIEW_SERDE", 335, "hive.materializedview.serde", "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MATERIALIZED_VIEW_SERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ENABLE_JDBC_PUSHDOWN", 336, "hive.jdbc.pushdown.enable", v, "Flag to control enabling pushdown of operators into JDBC connection and subsequent SQL generation\nusing Calcite");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_PUSHDOWN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ENABLE_JDBC_SAFE_PUSHDOWN", 337, "hive.jdbc.pushdown.safe.enable", v, "Flag to control enabling pushdown of operators using Calcite that prevent splitting results\nretrieval in the JDBC storage handler");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENABLE_JDBC_SAFE_PUSHDOWN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINBUCKETCACHESIZE", 338, "hive.mapjoin.bucket.cache.size", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINBUCKETCACHESIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINUSEOPTIMIZEDTABLE", 339, "hive.mapjoin.optimized.hashtable", v, "Whether Hive should use memory-optimized hash table for MapJoin.\nOnly works on Tez because memory-optimized hashtable cannot be serialized.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINUSEOPTIMIZEDTABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT", 340, "hive.mapjoin.optimized.hashtable.probe.percent", v, "Probing space percentage of the optimized hashtable");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINPARALELHASHTABLETHREADS", 341, "hive.mapjoin.hashtable.load.threads", v, "Number of threads used to load records from a broadcast edge in HT used for MJ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINPARALELHASHTABLETHREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEUSEHYBRIDGRACEHASHJOIN", 342, "hive.mapjoin.hybridgrace.hashtable", v, "Whether to use hybridgrace hash join as the join method for mapjoin. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEHYBRIDGRACEHASHJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1024);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ", 343, "hive.mapjoin.hybridgrace.memcheckfrequency", v, "For hybrid grace hash join, how often (how many rows apart) we check if memory is full. This number should be power of 2.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(524288);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHYBRIDGRACEHASHJOINMINWBSIZE", 344, "hive.mapjoin.hybridgrace.minwbsize", v, "For hybrid graceHash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINWBSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(16);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS", 345, "hive.mapjoin.hybridgrace.minnumpartitions", v, "ForHybrid grace hash join, the minimum number of partitions to create.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(8388608);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLEWBSIZE", 346, "hive.mapjoin.optimized.hashtable.wbsize", v, "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\nstore data. This is one buffer size. HT may be slightly faster if this is larger, but for small\njoins unnecessary memory will be allocated and then trimmed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEWBSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHYBRIDGRACEHASHJOINBLOOMFILTER", 347, "hive.mapjoin.hybridgrace.bloomfilter", v, "Whether to use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHYBRIDGRACEHASHJOINBLOOMFILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINFULLOUTER", 348, "hive.mapjoin.full.outer", v, "Whether to use MapJoin for FULL OUTER JOINs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFULLOUTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "enable";
v[2] = "disable";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean)>("HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE", 349, "hive.test.mapjoin.full.outer.override", "none", v, "internal use only, used to override the hive.mapjoin.full.outer\nsetting.  Using enable will force it on and disable will force it off.\nThe default none is do nothing, of course", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESMBJOINCACHEROWS", 350, "hive.smbjoin.cache.rows", v, "How many rows with the same key value should be cached in memory per smb joined table.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMBJOINCACHEROWS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEGROUPBYMAPINTERVAL", 351, "hive.groupby.mapaggr.checkinterval", v, "Number of rows after which size of the grouping keys/aggregation classes is performed");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEGROUPBYMAPINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPAGGRHASHMEMORY", 352, "hive.map.aggr.hash.percentmemory", v, "Portion of total memory to be used by map-side group aggregation hash table");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMEMORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.3F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY", 353, "hive.mapjoin.followby.map.aggr.hash.percentmemory", v, "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.9F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPAGGRMEMORYTHRESHOLD", 354, "hive.map.aggr.hash.force.flush.memory.threshold", v, "The max memory to be used by map-side group aggregation hash table.\nIf the memory usage is higher than this number, force to flush data");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRMEMORYTHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.99F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPAGGRHASHMINREDUCTION", 355, "hive.map.aggr.hash.min.reduction", v, "Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number. \nSet to 1 to make sure hash aggregation is never turned off.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.4F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND", 356, "hive.map.aggr.hash.min.reduction.lower.bound", v, "Lower bound of Hash aggregate reduction filter. See also: hive.map.aggr.hash.min.reduction");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST", 357, "hive.map.aggr.hash.min.reduction.stats", v, "Whether the value for hive.map.aggr.hash.min.reduction should be set statically using stats estimates. \nIf this is enabled, the default value for hive.map.aggr.hash.min.reduction is only used as an upper-bound\nfor the value set in the map-side group by operators.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMULTIGROUPBYSINGLEREDUCER", 358, "hive.multigroupby.singlereducer", v, "Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has \ncommon group by keys, it will be optimized to generate single M/R job.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMULTIGROUPBYSINGLEREDUCER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MAP_GROUPBY_SORT", 359, "hive.map.groupby.sorted", v, "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \nthe group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\nis that it limits the number of mappers to the number of files.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAP_GROUPBY_SORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DEFAULT_NULLS_LAST", 360, "hive.default.nulls.last", v, "Whether to set NULLS LAST as the default null ordering for ASC order and NULLS FIRST for DESC order.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_NULLS_LAST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_GROUPBY_POSITION_ALIAS", 361, "hive.groupby.position.alias", v, "Whether to enable using Column Position Alias in Group By");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_POSITION_ALIAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORDERBY_POSITION_ALIAS", 362, "hive.orderby.position.alias", v, "Whether to enable using Column Position Alias in Order By");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORDERBY_POSITION_ALIAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORDERBY_POSITION_ALIAS>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_POSITION_ALIAS>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Whether to enable using Column Position Alias in Group By or Order By (deprecated).\nUse \u or \u instead");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_GROUPBY_ORDERBY_POSITION_ALIAS", 363, "hive.groupby.orderby.position.alias", v, v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_ORDERBY_POSITION_ALIAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(30);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_NEW_JOB_GROUPING_SET_CARDINALITY", 364, "hive.new.job.grouping.set.cardinality", v, "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\nFor a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\nThis can lead to explosion across map-reduce boundary if the cardinality of T is very high,\nand map-side aggregation does not do a very good job. \n\nThis parameter decides if Hive should add an additional map-reduce job. If the grouping set\ncardinality (4 in the example above), is more than this value, a new MR job is added under the\nassumption that the original group by will reduce the data size.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NEW_JOB_GROUPING_SET_CARDINALITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_GROUPBY_LIMIT_EXTRASTEP", 365, "hive.groupby.limit.extrastep", v, "This parameter decides if Hive should \ncreate new MR job for sorting final output");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_GROUPBY_LIMIT_EXTRASTEP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXEC_COPYFILE_MAXNUMFILES", 366, "hive.exec.copyfile.maxnumfiles", v, "Maximum number of files Hive uses to do sequential HDFS copies between directories.Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXNUMFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(33554432L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXEC_COPYFILE_MAXSIZE", 367, "hive.exec.copyfile.maxsize", v, "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories.Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_COPYFILE_MAXSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEUDTFAUTOPROGRESS", 368, "hive.udtf.auto.progress", v, "Whether Hive should automatically send progress information to TaskTracker \nwhen using UDTF\'s to prevent the task getting killed because of inactivity.  Users should be cautious \nbecause this may prevent TaskTracker from killing tasks with infinite loops.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUDTFAUTOPROGRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[5];
v[0] = "TextFile";
v[1] = "SequenceFile";
v[2] = "RCfile";
v[3] = "ORC";
v[4] = "parquet";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVEDEFAULTFILEFORMAT", 369, "hive.default.fileformat", "TextFile", v, "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTFILEFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[6];
v[0] = "none";
v[1] = "TextFile";
v[2] = "SequenceFile";
v[3] = "RCfile";
v[4] = "ORC";
v[5] = "parquet";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVEDEFAULTMANAGEDFILEFORMAT", 370, "hive.default.fileformat.managed", "none", v, "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \ncreated with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \nfor all tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTMANAGEDFILEFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DEFAULT_STORAGE_HANDLER", 371, "hive.default.storage.handler.class", "", "Default storage handler class for CREATE TABLE statements. If this is set to a valid class, a \'CREATE TABLE ... STORED AS ... LOCATION ...\' command will be equivalent to \'CREATE TABLE ... STORED BY [default.storage.handler.class] LOCATION ...\'. Any STORED AS clauses will be ignored, given that STORED BY and STORED AS are incompatible within the same command. Users can explicitly override the default class by issuing \'CREATE TABLE ... STORED BY [overriding.storage.handler.class] ...\'");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DEFAULT_STORAGE_HANDLER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <org.apache.hadoop.hive.conf.HiveConf$ResultFileFormat: org.apache.hadoop.hive.conf.HiveConf$ResultFileFormat SEQUENCEFILE>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ResultFileFormat: java.lang.String toString()>();
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ResultFileFormat: java.util.EnumSet getValidSet()>();
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.util.EnumSet)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVEQUERYRESULTFILEFORMAT", 372, "hive.query.result.fileformat", v, v, "Default file format for storing result of the query.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYRESULTFILEFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECHECKFILEFORMAT", 373, "hive.fileformat.check", v, "Whether to check file format or not when loading data files");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECHECKFILEFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEDEFAULTRCFILESERDE", 374, "hive.default.rcfile.serde", "org.apache.hadoop.hive.serde.columnar.LazyBinaryColumnarSerDe", "The default SerDe Hive will use for the RCFile format");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTRCFILESERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEDEFAULTSERDE", 375, "hive.default.serde", "org.apache.hadoop.hive.serde.lazy.LazySimpleSerDe", "The default SerDe Hive will use for storage formats that do not specify a SerDe.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDEFAULTSERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SERDESUSINGMETASTOREFORSCHEMA", 376, "hive.serdes.using.metastore.for.schema", "org.apache.hadoop.hive.ql.io.orc.OrcSerde,org.apache.hadoop.hive.serde.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde.columnar.ColumnarSerDe,org.apache.hadoop.hive.serde.MetadataTypedColumnsetSerDe,org.apache.hadoop.hive.serde.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,org.apache.hadoop.hive.serde.lazybinary.LazyBinarySerDe,org.apache.hadoop.hive.serde.OpenCSVSerde", "SerDes retrieving schema from metastore. This is an internal parameter.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERDESUSINGMETASTOREFORSCHEMA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERDESUSINGMETASTOREFORSCHEMA>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("A backward compatibility setting for external metastore users that do not handle \n\u correctly. This may be removed at any time.");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES", 377, "hive.legacy.schema.for.all.serdes", v, v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("${system:java.io.tmpdir}\u0001${system:user.name}");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHISTORYFILELOC", 378, "hive.querylog.location", v, "Location of Hive run time structured log file");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHISTORYFILELOC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG_INCREMENTAL_PLAN_PROGRESS", 379, "hive.querylog.enable.plan.progress", v, "Whether to log the plan\'s progress every time a job\'s progress is checked.\nThese logs are written to the location specified by hive.querylog.location");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL", 380, "hive.querylog.plan.progress.interval", "60000ms", v, "The interval to wait between logging the plan\'s progress.\nIf there is a whole number percentage change in the progress of the mappers or the reducers,\nthe progress is logged regardless of this value.\nThe actual interval will be the ceiling of (this value divided by the value of\nhive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\nI.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\nlogged less frequently than specified.\nThis only has an effect if hive.querylog.enable.plan.progress is set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTSERDE", 381, "hive.script.serde", "org.apache.hadoop.hive.serde.lazy.LazySimpleSerDe", "The default SerDe for transmitting input data to and reading output data from the user scripts. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTSERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTRECORDREADER", 382, "hive.script.recordreader", "org.apache.hadoop.hive.ql.exec.TextRecordReader", "The default record reader for reading data from the user scripts. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDREADER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTRECORDWRITER", 383, "hive.script.recordwriter", "org.apache.hadoop.hive.ql.exec.TextRecordWriter", "The default record writer for writing data to the user scripts. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTRECORDWRITER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTESCAPE", 384, "hive.transform.escape.input", v, "This adds an option to escape special chars (newlines, carriage returns and\ntabs) when they are passed to the user script. This is useful if the Hive tables\ncan contain data that contains special characters.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTESCAPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEBINARYRECORDMAX", 385, "hive.binary.record.max.length", v, "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \nThe last record before the end of stream can have less than hive.binary.record.max.length bytes");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEBINARYRECORDMAX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHADOOPMAXMEM", 386, "hive.mapred.local.mem", v, "mapper/reducer memory in local mode");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHADOOPMAXMEM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(25000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESMALLTABLESFILESIZE", 387, "hive.mapjoin.smalltable.filesize", v, "The threshold for the input file size of the small tables; if the file size is smaller \nthan this threshold, it will try to convert the common join into map join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESMALLTABLESFILESIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SCHEMA_EVOLUTION", 388, "hive.exec.schema.evolution", v, "Use schema evolution to convert self-describing file format\'s data to the schema desired by the reader.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEMA_EVOLUTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION", 389, "orc.force.positional.evolution", v, "Whether to use column position based schema evolution or not (as opposed to column name based evolution)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TRANSACTIONAL_TABLE_SCAN", 390, "hive.transactional.table.scan", v, "internal usage only -- do transaction (ACID or insert-only) table scan.", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_TABLE_SCAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY", 391, "hive.transactional.events.mem", v, "Vectorized ACID readers can often load all the delete events from all the delete deltas\ninto memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic\nthat limits the total number of delete events that can be loaded into memory at once.\nRoughly it has been set to 10 million delete events per bucket (~160 MB).\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("FILTER_DELETE_EVENTS", 392, "hive.txn.filter.delete.events", v, "If true, VectorizedOrcAcidRowBatchReader will compute min/max ROW__ID for the split and only load delete events in that range.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars FILTER_DELETE_EVENTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESAMPLERANDOMNUM", 393, "hive.sample.seednumber", v, "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLERANDOMNUM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODE", 394, "hive.test.mode", v, "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVEEXIMTESTMODE", 395, "hive.exim.test.mode", v, "The subset of test mode that only enables custom path handling for ExIm.", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXIMTESTMODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEPREFIX", 396, "hive.test.mode.prefix", "test_", "In test mode, specifies prefixes for the output table", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEPREFIX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(32);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODESAMPLEFREQ", 397, "hive.test.mode.samplefreq", v, "In test mode, specifies sampling frequency for table, which is not bucketed,\nFor example, the following query:\n  INSERT OVERWRITE TABLE dest SELECT col from src\nwould be converted to\n  INSERT OVERWRITE TABLE test_dest\n  SELECT col from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODESAMPLEFREQ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODENOSAMPLE", 398, "hive.test.mode.nosamplelist", "", "In test mode, specifies comma separated table names which would not apply sampling", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODENOSAMPLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEDUMMYSTATAGGR", 399, "hive.test.dummystats.aggregator", "", "internal variable for test", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATAGGR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEDUMMYSTATPUB", 400, "hive.test.dummystats.publisher", "", "internal variable for test", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEDUMMYSTATPUB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTCURRENTTIMESTAMP", 401, "hive.test.currenttimestamp", null, "current timestamp for test", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTCURRENTTIMESTAMP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEROLLBACKTXN", 402, "hive.test.rollbacktxn", v, "For testing only.  Will mark every ACID transaction aborted", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEROLLBACKTXN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEFAILCOMPACTION", 403, "hive.test.fail.compaction", v, "For testing only.  Will cause CompactorMR to fail.", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILCOMPACTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEFAILLOADDYNAMICPARTITION", 404, "hive.test.fail.load.dynamic.partition", v, "For testing only.  Will cause loadDynamicPartition to fail.", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILLOADDYNAMICPARTITION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEFAILHEARTBEATER", 405, "hive.test.fail.heartbeater", v, "For testing only.  Will cause Heartbeater to fail.", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEFAILHEARTBEATER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("TESTMODE_BUCKET_CODEC_VERSION", 406, "hive.test.bucketcodec.version", v, "For testing only.  Will make ACID subsystem write RecordIdentifier.bucketId in specified\nformat", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TESTMODE_BUCKET_CODEC_VERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXTEND_BUCKET_ID_RANGE", 407, "hive.extend.bucketid.range", v, "Dynamically allocate some bits from statement id when bucket id overflows. This allows having more than 4096 buckets.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTEND_BUCKET_ID_RANGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVETESTMODEACIDKEYIDXSKIP", 408, "hive.test.acid.key.index.skip", v, "For testing only. OrcRecordUpdater will skip generation of the hive.acid.key.index", 0);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETESTMODEACIDKEYIDXSKIP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGEMAPFILES", 409, "hive.merge.mapfiles", v, "Merge small files at the end of a map-only job");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGEMAPREDFILES", 410, "hive.merge.mapredfiles", v, "Merge small files at the end of a map-reduce job");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPREDFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGETEZFILES", 411, "hive.merge.tezfiles", v, "Merge small files at the end of a Tez DAG");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGETEZFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(256000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGEMAPFILESSIZE", 412, "hive.merge.size.per.task", v, "Size of merged files at the end of the job");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(16000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGEMAPFILESAVGSIZE", 413, "hive.merge.smallfiles.avgsize", v, "When the average output file size of a job is less than this number, Hive will start an additional \nmap-reduce job to merge the output files into bigger files. This is only done for map-only jobs \nif hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEMAPFILESAVGSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGERCFILEBLOCKLEVEL", 414, "hive.merge.rcfile.block.level", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGERCFILEBLOCKLEVEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMERGEORCFILESTRIPELEVEL", 415, "hive.merge.orcfile.stripe.level", v, "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\nwhile writing a table with ORC file format, enabling this config will do stripe-level\nfast merge for small ORC files. Note that enabling this config will not honor the\npadding tolerance config (hive.exec.orc.block.padding.tolerance).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMERGEORCFILESTRIPELEVEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_CODEC_POOL", 416, "hive.use.orc.codec.pool", v, "Whether to use codec pool in ORC. Disable if there are bugs with codec reuse.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CODEC_POOL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEUSEEXPLICITRCFILEHEADER", 417, "hive.exec.rcfile.use.explicit.header", v, "If this is set the header for RCFiles will simply be RCF.  If this is not\nset the header will be that borrowed from sequence files, e.g. SEQ- followed\nby the input and output RCFile formats.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEEXPLICITRCFILEHEADER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEUSERCFILESYNCCACHE", 418, "hive.exec.rcfile.use.sync.cache", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSERCFILESYNCCACHE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2147483647);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RCFILE_RECORD_INTERVAL", 419, "hive.io.rcfile.record.interval", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RCFILE_COLUMN_NUMBER_CONF", 420, "hive.io.rcfile.column.number.conf", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_COLUMN_NUMBER_CONF> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RCFILE_TOLERATE_CORRUPTIONS", 421, "hive.io.rcfile.tolerate.corruptions", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_TOLERATE_CORRUPTIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4194304);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RCFILE_RECORD_BUFFER_SIZE", 422, "hive.io.rcfile.record.buffer.size", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RCFILE_RECORD_BUFFER_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("PARQUET_MEMORY_POOL_RATIO", 423, "parquet.memory.pool.ratio", v, "Maximum fraction of heap that can be used by Parquet file writers in one task.\nIt is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\nThis config parameter is defined in Parquet, so that it does not start with \'hive.\'.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars PARQUET_MEMORY_POOL_RATIO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION", 424, "hive.parquet.timestamp.skip.conversion", v, "Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversionon reading parquet files from other tools");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN", 425, "hive.parquet.date.proleptic.gregorian", v, "Should we write date using the proleptic Gregorian calendar instead of the hybrid Julian Gregorian?\nHybrid is the default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN_DEFAULT", 426, "hive.parquet.date.proleptic.gregorian.default", v, "This value controls whether date type in Parquet files was written using the hybrid or proleptic\ncalendar. Hybrid is the default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_DATE_PROLEPTIC_GREGORIAN_DEFAULT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_TIMESTAMP_LEGACY_CONVERSION_ENABLED", 427, "hive.parquet.timestamp.legacy.conversion.enabled", v, "Whether to use former Java date/time APIs to convert between timezones when reading timestamps from Parquet files. The property has no effect when the file contains explicit metadata about the conversion used to write the data; in this case reading conversion is chosen based on the metadata.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_LEGACY_CONVERSION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED", 428, "hive.parquet.timestamp.write.legacy.conversion.enabled", v, "Whether to use former Java date/time APIs to convert between timezones when writing timestamps in Parquet files. Once data are written to the file the effect is permanent (also reflected in the metadata).Changing the value of this property affects only new data written to the file.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "binary";
v[1] = "string";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PARQUET_INFER_BINARY_AS", 429, "hive.parquet.infer.binary.as", "binary", v, "This setting controls what the parquet binary type gets inferred as by CREATE TABLE LIKE FILE. This is helpful since some systems specify the parquet schema for strings as binary.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_INFER_BINARY_AS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION", 430, "hive.avro.timestamp.skip.conversion", v, "Some older Hive implementations (pre-3.1) wrote Avro timestamps in a UTC-normalizedmanner, while from version 3.1 until now Hive wrote time zone agnostic timestamps. Setting this flag to true will treat legacy timestamps as time zone agnostic. Setting it to false will treat legacy timestamps as UTC-normalized. This flag will not affect timestamps written after this change.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AVRO_PROLEPTIC_GREGORIAN", 431, "hive.avro.proleptic.gregorian", v, "Should we write date and timestamp using the proleptic Gregorian calendar instead of the hybrid Julian Gregorian?\nHybrid is the default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT", 432, "hive.avro.proleptic.gregorian.default", v, "This value controls whether date and timestamp type in Avro files was written using the hybrid or proleptic\ncalendar. Hybrid is the default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED", 433, "hive.avro.timestamp.legacy.conversion.enabled", v, "Whether to use former Java date/time APIs to convert between timezones when reading timestamps from Avro files. The property has no effect when the file contains explicit metadata about the conversion used to write the data; in this case reading conversion is based on the metadata.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AVRO_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED", 434, "hive.avro.timestamp.write.legacy.conversion.enabled", v, "Whether to use former Java date/time APIs to convert between timezones when writing timestamps in Avro files. Once data are written to the file the effect is permanent (also reflected in the metadata).Changing the value of this property affects only new data written to the file.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AVRO_TIMESTAMP_WRITE_LEGACY_CONVERSION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS", 435, "hive.int.timestamp.conversion.in.seconds", v, "Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\nSet this flag to true to interpret the value as seconds to be consistent with float/double.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PARQUET_WRITE_INT64_TIMESTAMP", 436, "hive.parquet.write.int.timestamp", v, "Write parquet timestamps as int64/LogicalTypes instead of int96/OriginalTypes. Note:Timestamps will be time zone agnostic (NEVER converted to a different time zone).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_WRITE_INT64_TIMESTAMP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "nanos";
v[1] = "micros";
v[2] = "millis";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PARQUET_TIMESTAMP_TIME_UNIT", 437, "hive.parquet.timestamp.time.unit", "micros", v, "Store parquet int64/LogicalTypes timestamps in this time unit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PARQUET_TIMESTAMP_TIME_UNIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(8);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_BASE_DELTA_RATIO", 438, "hive.exec.orc.base.delta.ratio", v, "The ratio of base writer and\ndelta writer in terms of STRIPE_SIZE and BUFFER_SIZE.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BASE_DELTA_RATIO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED", 439, "hive.exec.orc.delta.streaming.optimizations.enabled", v, "Whether to enable streaming optimizations for ORC delta files. This will disable ORC\'s internal indexes,\ndisable compression, enable fast encoding and disable dictionary encoding.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "HYBRID";
v[1] = "BI";
v[2] = "ETL";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ORC_SPLIT_STRATEGY", 440, "hive.exec.orc.split.strategy", "HYBRID", v, "This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation as opposed to query execution (split generation does not read or cache file footers). ETL strategy is used when spending little more time in split generation is acceptable (split generation reads and caches file footers). HYBRID chooses between the above strategies based on heuristics.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_STRATEGY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(134217728L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE", 441, "hive.exec.orc.blob.storage.split.size", v, "When blob storage is used, BI split strategy does not have block locations for splitting orc files.\nIn such cases, split generation will use this config to split orc file");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_BLOB_STORAGE_SPLIT_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED", 442, "hive.exec.orc.writer.llap.memory.manager.enabled", v, "Whether orc writers should use llap-aware memory manager. LLAP aware memory manager will use memory\nper executor instead of entire heap memory when concurrent orc writers are involved. This will let\ntask fragments to use memory within its limit (memory per executor) when performing ETL in LLAP.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STREAMING_AUTO_FLUSH_ENABLED", 443, "hive.streaming.auto.flush.enabled", v, "Whether to enable memory \nmonitoring and automatic flushing of open record updaters during streaming ingest. This is an expert level \nsetting and disabling this may have severe performance impact under memory pressure.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.7F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD", 444, "hive.heap.memory.monitor.usage.threshold", v, "Hive streaming does automatic memory management across all open record writers. This threshold will let the \nmemory monitor take an action (flush open files) when heap memory usage exceeded this threshold.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE", 445, "hive.streaming.auto.flush.check.interval.size", "100Mb", v, "Hive streaming ingest has auto flush mechanism to flush all open record updaters under memory pressure.\nWhen memory usage exceed hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will \nwait until this size (default 100Mb) of records are ingested before triggering flush.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CLASSLOADER_SHADE_PREFIX", 446, "hive.classloader.shade.prefix", "", "During reflective instantiation of a class\n(input, output formats, serde etc.), when classloader throws ClassNotFoundException, as a fallback this\nshade prefix will be used before class reference and retried.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLASSLOADER_SHADE_PREFIX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_MS_FOOTER_CACHE_ENABLED", 447, "hive.orc.splits.ms.footer.cache.enabled", v, "Whether to enable using file metadata cache in metastore for ORC file footers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_MS_FOOTER_CACHE_PPD", 448, "hive.orc.splits.ms.footer.cache.ppd.enabled", v, "Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\nmust also be set to true for this to work).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_MS_FOOTER_CACHE_PPD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS", 449, "hive.orc.splits.include.file.footer", v, "If turned on splits generated by orc will include metadata about the stripes in the file. This\ndata is read remotely (from the client or HS2 machine) and sent to all the tasks.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS", 450, "hive.orc.splits.directory.batch.ms", v, "How long, in ms, to wait to batch input directories for processing during ORC split\ngeneration. 0 means process directories individually. This can increase the number of\nmetastore calls if metastore metadata cache is used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS", 451, "hive.orc.splits.include.fileid", v, "Include file ID in splits on file systems that support it.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS", 452, "hive.orc.splits.allow.synthetic.fileid", v, "Allow synthetic file ID in splits on file systems that don\'t have a native one.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE", 453, "hive.orc.cache.stripe.details.mem.size", "256Mb", v, "Maximum size of orc splits cached in the client.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS", 454, "hive.orc.compute.splits.num.threads", v, "How many threads orc should use to create splits in parallel.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ORC_CACHE_USE_SOFT_REFERENCES", 455, "hive.orc.cache.use.soft.references", v, "By default, the cache that ORC input format uses to store orc file footer use hard\nreferences for the cached object. Setting this to true can help avoid out of memory\nissues under memory pressure (in some cases) at the cost of slight unpredictability in\noverall query performance.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_CACHE_USE_SOFT_REFERENCES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB", 456, "hive.io.sarg.cache.max.weight.mb", v, "The max weight allowed for the SearchArgument Cache. By default, the cache allows a max-weight of 10MB, after which entries will be evicted.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL", 457, "hive.lazysimple.extended_boolean_literal", v, "LazySimpleSerde uses this property to determine if it treats \'T\', \'t\', \'F\', \'f\',\n\'1\', and \'0\' as extended, legal boolean literal, in addition to \'TRUE\' and \'FALSE\'.\nThe default is false, which means only \'TRUE\' and \'FALSE\' are treated as legal\nboolean literal.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESKEWJOIN", 458, "hive.optimize.skewjoin", v, "Whether to enable skew join optimization. \nThe algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\nprocessing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\njob, process those skewed keys. The same key need not be skewed for all the tables, and so,\nthe follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\nmap-join.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEDYNAMICPARTITIONHASHJOIN", 459, "hive.optimize.dynamic.partition.hashjoin", v, "Whether to enable dynamically partitioned hash join optimization. \nThis setting is also dependent on enabling hive.auto.convert.join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEDYNAMICPARTITIONHASHJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOIN", 460, "hive.auto.convert.join", v, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOINNOCONDITIONALTASK", 461, "hive.auto.convert.join.noconditionaltask", v, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \nIf this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\nspecified size, the join is directly converted to a mapjoin (there is no conditional task).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CONVERT_ANTI_JOIN", 462, "hive.auto.convert.anti.join", v, "Whether Hive enables the optimization about converting join with null filter to anti join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_ANTI_JOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD", 463, "hive.auto.convert.join.noconditionaltask.size", v, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \nHowever, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \nthe join is directly converted to a mapjoin(there is no conditional task). The default is 10MB");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOINUSENONSTAGED", 464, "hive.auto.convert.join.use.nonstaged", v, "For conditional joins, if input stream from a small alias can be directly applied to join operator without \nfiltering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\nCurrently, this is not working with vectorization or tez execution engine.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINUSENONSTAGED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESKEWJOINKEY", 465, "hive.skewjoin.key", v, "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\nwe think the key as a skew join key. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINKEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESKEWJOINMAPJOINNUMMAPTASK", 466, "hive.skewjoin.mapjoin.map.tasks", v, "Determine the number of map task used in the follow up map join job for a skew join.\nIt should be used together with hive.skewjoin.mapjoin.min.split to perform a fine-grained control.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINNUMMAPTASK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(33554432L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESKEWJOINMAPJOINMINSPLIT", 467, "hive.skewjoin.mapjoin.min.split", v, "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \nthe minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine-grained control.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESKEWJOINMAPJOINMINSPLIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESENDHEARTBEAT", 468, "hive.heartbeat.interval", v, "Send a heartbeat after this interval - used by mapjoin and filter operators");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESENDHEARTBEAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVELIMITMAXROWSIZE", 469, "hive.limit.row.max.size", v, "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITMAXROWSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVELIMITOPTLIMITFILE", 470, "hive.limit.optimize.limit.file", v, "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTLIMITFILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVELIMITOPTENABLE", 471, "hive.limit.optimize.enable", v, "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) 50000;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVELIMITOPTMAXFETCH", 472, "hive.limit.optimize.fetch.max", v, "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \nInsert queries are not restricted by this limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITOPTMAXFETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
v = new org.apache.hadoop.hive.conf.Validator$RatioValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RatioValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVELIMITPUSHDOWNMEMORYUSAGE", 473, "hive.limit.pushdown.memory.usage", v, v, "The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVELIMITPUSHDOWNMEMORYUSAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(21000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOINMAXENTRIESHASHTABLE", 474, "hive.auto.convert.join.hashtable.max.entries", v, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \nHowever, if it is on, and the predicted number of entries in hashtable for a given join \ninput is larger than this number, the join will not be converted to a mapjoin. \nThe value \"-1\" means no limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXENTRIESHASHTABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("XPRODSMALLTABLEROWSTHRESHOLD", 475, "hive.xprod.mapjoin.small.table.rows", v, "Maximum number of rows on build side of map join before it switches over to cross product edge");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars XPRODSMALLTABLEROWSTHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10000000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONVERTJOINMAXSHUFFLESIZE", 476, "hive.auto.convert.join.shuffle.max.size", v, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \nHowever, if it is on, and the predicted size of the larger input for a given join is greater \nthan this number, the join will not be converted to a dynamically partitioned hash join. \nThe value \"-1\" means no limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONVERTJOINMAXSHUFFLESIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.99F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLEKEYCOUNTADJUSTMENT", 477, "hive.hashtable.key.count.adjustment", v, "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate of the number of keys is divided by this value. If the value is 0, statistics are not usedand hive.hashtable.initialCapacity is used instead.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEKEYCOUNTADJUSTMENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLETHRESHOLD", 478, "hive.hashtable.initialCapacity", v, "Initial capacity of mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLETHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.75F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLELOADFACTOR", 479, "hive.hashtable.loadfactor", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLELOADFACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.55F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE", 480, "hive.mapjoin.followby.gby.localtask.max.memory.usage", v, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \nwhen this map join is followed by a group by. If the local task\'s memory usage is more than this number, \nthe local task will abort by itself. It means the data of the small table is too large to be held in memory.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.9F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLEMAXMEMORYUSAGE", 481, "hive.mapjoin.localtask.max.memory.usage", v, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \nIf the local task\'s memory usage is more than this number, the local task will abort by itself. \nIt means the data of the small table is too large to be held in memory.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLEMAXMEMORYUSAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEHASHTABLESCALE", 482, "hive.mapjoin.check.memory.rows", v, "The number means after how many rows processed it needs to check the memory usage");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEHASHTABLESCALE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEINPUTFORMAT", 483, "hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEINPUTFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZINPUTFORMAT", 484, "hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", "The default input format for tez. Tez groups splits in the AM.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZINPUTFORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZCONTAINERSIZE", 485, "hive.tez.container.size", v, "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCONTAINERSIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZCPUVCORES", 486, "hive.tez.cpu.vcores", v, "By default Tez will ask for however many cpus map-reduce is configured to use per container.\nThis can be used to overwrite.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZCPUVCORES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZJAVAOPTS", 487, "hive.tez.java.opts", null, "By default Tez will use the Java options from map tasks. This can be used to overwrite.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJAVAOPTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZLOGLEVEL", 488, "hive.tez.log.level", "INFO", "The log level to use for tasks executing as part of the DAG.\nUsed only if hive.tez.java.opts is used to configure Java options.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZLOGLEVEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZHS2USERACCESS", 489, "hive.tez.hs.user.access", v, "Whether to grant access to the hs2/hive user for queries");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZHS2USERACCESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEQUERYNAME", 490, "hive.query.name", null, "This named is used by Tez to set the dag name. This name in turn will appear on \nthe Tez UI representing the work that was done.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEQUERYNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVETEZJOBNAME", 491, "tez.job.name", "HIVE-%s", "This named is used by Tez to set the job name. This name in turn will appear on \nthe Yarn UI representing the Yarn Application Name. And The job name may be a \nJava String.format() string, to which the session ID will be supplied as the single parameter.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVETEZJOBNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SYSLOG_INPUT_FORMAT_FILE_PRUNING", 492, "hive.syslog.input.format.file.pruning", v, "Whether syslog input format should prune files based on timestamp (ts) column in sys.logs table.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_PRUNING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(9223372036854775807L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 0, v, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE", 493, "hive.syslog.input.format.file.time.slice", "300s", v, "Files stored in sys.logs typically are chunked with time interval. For example: depending on the\nlogging library used this represents the flush interval/time slice. \nIf time slice/flust interval is set to 5 minutes, then the expectation is that the filename \n2019-01-02-10-00_0.log represent time range from 10:00 to 10:05.\nThis time slice should align with the flush interval of the logging library else file pruning may\nincorrectly prune files leading to incorrect results from sys.logs table.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SYSLOG_INPUT_FORMAT_FILE_TIME_SLICE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTIMIZEBUCKETINGSORTING", 494, "hive.optimize.bucketingsorting", v, "Don\'t create a reducer for enforcing \nbucketing/sorting for queries of the form: \ninsert overwrite table T2 select * from T1;\nwhere T1 and T2 are bucketed/sorted by the same keys into the same number of buckets.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEBUCKETINGSORTING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPARTITIONER", 495, "hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEENFORCESORTMERGEBUCKETMAPJOIN", 496, "hive.enforce.sortmergebucketmapjoin", v, "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCESORTMERGEBUCKETMAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEENFORCEBUCKETMAPJOIN", 497, "hive.enforce.bucketmapjoin", v, "If the user asked for bucketed map-side join, and it cannot be performed, \nshould the query fail or not ? For example, if the buckets in the tables being joined are\nnot a multiple of each other, bucketed map-side join cannot be performed, and the\nquery will fail if hive.enforce.bucketmapjoin is set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEENFORCEBUCKETMAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SORT_WHEN_BUCKETING", 498, "hive.optimize.clustered.sort", v, "When this option is true, when a Hive table was created with a clustered by clause, we will also\nsort by same value (if sort columns were not specified)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SORT_WHEN_BUCKETING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ENFORCE_NOT_NULL_CONSTRAINT", 499, "hive.constraint.notnull.enforce", v, "Should \"IS NOT NULL \" constraint be enforced?");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENFORCE_NOT_NULL_CONSTRAINT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTO_SORTMERGE_JOIN", 500, "hive.auto.convert.sortmerge.join", v, "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTO_SORTMERGE_JOIN_REDUCE", 501, "hive.auto.convert.sortmerge.join.reduce.side", v, "Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_REDUCE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR", 502, "hive.auto.convert.sortmerge.join.bigtable.selection.policy", "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ", "The policy to choose the big table for automatic conversion to sort-merge join. \nBy default, the table with the largest partitions is assigned the big table. All policies are:\n. based on position of the table - the leftmost table is selected\norg.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n. based on total size (all the partitions selected in the query) of the table \norg.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n. based on average size (all the partitions selected in the query) of the table \norg.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\nNew policies can be added in future.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN", 503, "hive.auto.convert.sortmerge.join.to.mapjoin", v, "If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \nthis parameter decides whether each table should be tried as a big table, and effectively a map-join should be\ntried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\nbig table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\nsort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\nand bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\nwith few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\nif the complete small table can fit in memory, and a map-join can be performed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESCRIPTOPERATORTRUST", 504, "hive.exec.script.trust", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESCRIPTOPERATORTRUST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEROWOFFSET", 505, "hive.exec.rowoffset", v, "Whether to provide the row offset virtual column");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEROWOFFSET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTINDEXFILTER", 506, "hive.optimize.index.filter", v, "Whether to enable automatic use of indexes");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTINDEXFILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTPPD", 507, "hive.optimize.ppd", v, "Whether to enable predicate pushdown");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTPPD_WINDOWING", 508, "hive.optimize.ppd.windowing", v, "Whether to enable predicate pushdown through windowing");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_WINDOWING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPPDRECOGNIZETRANSITIVITY", 509, "hive.ppd.recognizetransivity", v, "Whether to transitively replicate predicate filters over equijoin conditions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDRECOGNIZETRANSITIVITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES", 510, "hive.ppd.recognize.column.equalities", v, "Whether we should traverse the join branches to discover transitive propagation opportunities over equijoin conditions. \nRequires hive.ppd.recognizetransivity to be set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPPDREMOVEDUPLICATEFILTERS", 511, "hive.ppd.remove.duplicatefilters", v, "During query optimization, filters may be pushed down in the operator tree. \nIf this config is true only pushed down filters remain in the operator tree, \nand the original filter is removed. If this config is false, the original filter \nis also left in the operator tree at the original place.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPPDREMOVEDUPLICATEFILTERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN", 512, "hive.optimize.join.disjunctive.transitive.predicates.pushdown", v, "Whether to transitively infer disjunctive predicates across joins. \nDisjunctive predicates are hard to simplify and pushing them down might lead to infinite rule matching causing stackoverflow and OOM errors");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPOINTLOOKUPOPTIMIZER", 513, "hive.optimize.point.lookup", v, "Whether to transform OR clauses in Filter operators into IN clauses");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPOINTLOOKUPOPTIMIZERMIN", 514, "hive.optimize.point.lookup.min", v, "Minimum number of OR clauses needed to transform into IN clauses");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPOINTLOOKUPOPTIMIZERMIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(16);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPT_TRANSFORM_IN_MAXNODES", 515, "hive.optimize.transform.in.maxnodes", v, "Maximum number of IN expressions beyond which IN will not be transformed into OR clause");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPT_TRANSFORM_IN_MAXNODES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECOUNTDISTINCTOPTIMIZER", 516, "hive.optimize.countdistinct", v, "Whether to transform count distinct into two stages");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTDISTINCTOPTIMIZER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEPARTITIONCOLUMNSEPARATOR", 517, "hive.optimize.partition.columns.separate", v, "Extract partition columns from IN clauses");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEPARTITIONCOLUMNSEPARATOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTCONSTANTPROPAGATION", 518, "hive.optimize.constant.propagation", v, "Whether to enable constant propagation optimizer");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCONSTANTPROPAGATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEIDENTITYPROJECTREMOVER", 519, "hive.optimize.remove.identity.project", v, "Removes identity project from operator tree");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEIDENTITYPROJECTREMOVER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEMETADATAONLYQUERIES", 520, "hive.optimize.metadataonly", v, "Whether to eliminate scans of the tables from which no columns are selected. Note\nthat, when selecting from empty tables with data files, this can produce incorrect\nresults, so it\'s disabled by default. It works correctly for normal tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEMETADATAONLYQUERIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVENULLSCANOPTIMIZE", 521, "hive.optimize.null.scan", v, "Don\'t scan relations which are guaranteed to not generate any rows");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVENULLSCANOPTIMIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTPPD_STORAGE", 522, "hive.optimize.ppd.storage", v, "Whether to push predicates down to storage handlers");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTPPD_STORAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTGROUPBY", 523, "hive.optimize.groupby", v, "Whether to enable the bucketed group by from bucketed partitions/tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTGROUPBY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTBUCKETMAPJOIN", 524, "hive.optimize.bucketmapjoin", v, "Whether to try bucket mapjoin");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTBUCKETMAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTSORTMERGEBUCKETMAPJOIN", 525, "hive.optimize.bucketmapjoin.sortedmerge", v, "Whether to try sorted bucket merge map join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTMERGEBUCKETMAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTREDUCEDEDUPLICATION", 526, "hive.optimize.reducededuplication", v, "Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \nThis should always be set to true. Since it is a new feature, it has been made configurable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTREDUCEDEDUPLICATIONMINREDUCER", 527, "hive.optimize.reducededuplication.min.reducer", v, "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \nThat means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\nThe optimization will be automatically disabled if number of reducers would be less than specified value.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTREDUCEDEDUPLICATIONMINREDUCER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTJOINREDUCEDEDUPLICATION", 528, "hive.optimize.joinreducededuplication", v, "Remove extra shuffle/sorting operations after join algorithm selection has been executed. \nCurrently it only works with Apache Tez. This should always be set to true. \nSince it is a new feature, it has been made configurable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTJOINREDUCEDEDUPLICATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD", 529, "hive.optimize.sort.dynamic.partition.threshold", v, "When enabled dynamic partitioning column will be globally sorted.\nThis way we can keep only one record writer open for each partition value\nin the reducer thereby reducing the memory pressure on reducers.\nThis config has following possible values: \n\t-1 - This completely disables the optimization. \n\t - This always enable the optimization. \n\t - This makes the optimization a cost based decision. \nSetting it to any other positive integer will make Hive use this as threshold for number of writers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESAMPLINGFORORDERBY", 530, "hive.optimize.sampling.orderby", v, "Uses sampling on order-by clause for parallel execution.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGFORORDERBY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESAMPLINGNUMBERFORORDERBY", 531, "hive.optimize.sampling.orderby.number", v, "Total number of samples to be obtained.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGNUMBERFORORDERBY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
v = new org.apache.hadoop.hive.conf.Validator$RatioValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RatioValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVESAMPLINGPERCENTFORORDERBY", 532, "hive.optimize.sampling.orderby.percent", v, v, "Probability with which a row will be chosen.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESAMPLINGPERCENTFORORDERBY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_REMOVE_ORDERBY_IN_SUBQUERY", 533, "hive.remove.orderby.in.subquery", v, "If set to true, order/sort by without limit in sub queries will be removed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_ORDERBY_IN_SUBQUERY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTIMIZEDISTINCTREWRITE", 534, "hive.optimize.distinct.rewrite", v, "When applicable this optimization rewrites distinct aggregates from a single stage to multi-stage aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or not should be cost based decision. Until Hive formalizes cost model for this, this is config driven.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEDISTINCTREWRITE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_UNION_REMOVE", 535, "hive.optimize.union.remove", v, "Whether to remove the union and push the operators between union and the filesink above union. \nThis avoids an extra scan of the output by union. This is independently useful for union\nqueries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\nextra union is inserted.\n\nThe merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\nIf the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\nnumber of reducers are few, so the number of files anyway are small. However, with this optimization,\nwe are increasing the number of files possibly by a big margin. So, we merge aggressively.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_UNION_REMOVE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTCORRELATION", 536, "hive.optimize.correlation", v, "exploit intra-query correlations.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTCORRELATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_LIMIT_TRANSPOSE", 537, "hive.optimize.limittranspose", v, "Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\ninput is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\nto the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE", 538, "hive.optimize.limittranspose.reductionpercentage", v, "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\nsize of the outer input of the join or input of the union that we should get in order to apply the rule.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES", 539, "hive.optimize.limittranspose.reductiontuples", v, "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\nnumber of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_CONSTRAINTS_JOIN", 540, "hive.optimize.constraints.join", v, "Whether to use referential constraints\nto optimize (remove or transform) join operators");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_CONSTRAINTS_JOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_SORT_PREDS_WITH_STATS", 541, "hive.optimize.filter.preds.sort", v, "Whether to sort conditions in filters\nbased on estimated selectivity and compute cost");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SORT_PREDS_WITH_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_REDUCE_WITH_STATS", 542, "hive.optimize.filter.stats.reduction", v, "Whether to simplify comparison\nexpressions in filter operators using column stats");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_REDUCE_WITH_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME", 543, "hive.optimize.skewjoin.compiletime", v, "Whether to create a separate plan for skewed keys for the tables in the join.\nThis is based on the skewed keys stored in the metadata. At compile time, the plan is broken\ninto different joins: one for the skewed keys, and the other for the remaining keys. And then,\na union is performed for the 2 joins generated above. So unless the same skewed key is present\nin both the joined tables, the join for the skewed key will be performed as a map-side join.\n\nThe main difference between this parameter and hive.optimize.skewjoin is that this parameter\nuses the skew information stored in the metastore to optimize the plan at compile time itself.\nIf there is no skew information in the metadata, this parameter will not have any affect.\nBoth hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\nIdeally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\nso for backward compatibility.\n\nIf the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\nwould change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_LIMIT", 544, "hive.optimize.limit", v, "Optimize limit by pushing through Left Outer Joins and Selects");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_TOPNKEY", 545, "hive.optimize.topnkey", v, "Whether to enable top n key optimizer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TOPNKEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(128);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MAX_TOPN_ALLOWED", 546, "hive.optimize.topnkey.max", v, "Maximum topN value allowed by top n key optimizer.\nIf the LIMIT is greater than this value then top n key optimization won\'t be used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_TOPN_ALLOWED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.8F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TOPN_EFFICIENCY_THRESHOLD", 547, "hive.optimize.topnkey.efficiency.threshold", v, "Disable topN key filter if the ratio between forwarded and total rows reaches this limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TOPN_EFFICIENCY_CHECK_BATCHES", 548, "hive.optimize.topnkey.efficiency.check.nbatches", v, "Check topN key filter efficiency after a specific number of batches.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_EFFICIENCY_CHECK_BATCHES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(64);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TOPN_MAX_NUMBER_OF_PARTITIONS", 549, "hive.optimize.topnkey.partitions.max", v, "Limit the maximum number of partitions used by the top N key operator.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TOPN_MAX_NUMBER_OF_PARTITIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_OPTIMIZATION", 550, "hive.optimize.shared.work", v, "Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\nand follow-up operators in the query plan and merges them if they meet some preconditions. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_OPTIMIZATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION", 551, "hive.optimize.shared.work.extended", v, "Whether to enable shared work extended optimizer. The optimizer tries to merge equal operators\nafter a work boundary after shared work optimizer has been executed. Requires hive.optimize.shared.work\nto be set to true. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION", 552, "hive.optimize.shared.work.semijoin", v, "Whether to enable shared work extended optimizer for semijoins. The optimizer tries to merge\nscan operators if one of them reads the full table, even if the other one is the target for\none or more semijoin edges. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_SEMIJOIN_OPTIMIZATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_MERGE_TS_SCHEMA", 553, "hive.optimize.shared.work.merge.ts.schema", v, "Whether to enable merging scan operators over the same table but with different schema.The optimizer tries to merge the scan operators by taking the union of needed columns from all scan operators. Requires hive.optimize.shared.work to be set to true. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_MERGE_TS_SCHEMA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE", 554, "hive.optimize.shared.work.mapjoin.cache.reuse", v, "When shared work optimizer is enabled, whether we should reuse the cache for the broadcast side\nof mapjoin operators that share same broadcast input. Requires hive.optimize.shared.work\nto be set to true. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_DPPUNION_OPTIMIZATION", 555, "hive.optimize.shared.work.dppunion", v, "Enables dppops unioning. This optimization will enable to merge multiple tablescans with different dynamic filters into a single one (with a more complex filter)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_OPTIMIZATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_DPPUNION_MERGE_EVENTOPS", 556, "hive.optimize.shared.work.dppunion.merge.eventops", v, "Enables DPPUnion to merge EventOperators (right now this is used during DynamicPartitionPruning)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DPPUNION_MERGE_EVENTOPS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_DOWNSTREAM_MERGE", 557, "hive.optimize.shared.work.downstream.merge", v, "Analyzes and merges equiv downstream operators after a successful shared work optimization step.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_DOWNSTREAM_MERGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SHARED_WORK_PARALLEL_EDGE_SUPPORT", 558, "hive.optimize.shared.work.parallel.edge.support", v, "Lets the shared work optimizer to create parallel edges in case they are for semijoins or mapjoins.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SHARED_WORK_PARALLEL_EDGE_SUPPORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_REMOVE_SQ_COUNT_CHECK", 559, "hive.optimize.remove.sq_count_check", v, "Whether to remove an extra join with sq_count_check for scalar subqueries with constant group by keys.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REMOVE_SQ_COUNT_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE", 560, "hive.optimize.update.table.properties.from.serde", v, "Whether to update table-properties by initializing tables\' SerDe instances during logical-optimization. \nBy doing so, certain SerDe classes (like AvroSerDe) can pre-calculate table-specific information, and \nstore it in table-properties, to be used later in the SerDe, while running the job.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST", 561, "hive.optimize.update.table.properties.from.serde.list", "org.apache.hadoop.hive.serde.avro.AvroSerDe", "The comma-separated list of SerDe classes that are considered when enhancing table-properties \nduring logical optimization.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_SCAN_PROBEDECODE", 562, "hive.optimize.scan.probedecode", v, "Whether to find suitable table scan operators that could reduce the number of decoded rows at runtime by probing extra available information. \nThe probe side for the row-level filtering is generated either statically in the case of expressions or dynamically for joinse.g., use the cached MapJoin hashtable created on the small table side to filter out row columns that are not going to be used when reading the large table data. This will result less CPU cycles spent for decoding unused data.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_SCAN_PROBEDECODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_HMS_QUERY_CACHE_ENABLED", 563, "hive.optimize.metadata.query.cache.enabled", v, "This property enables caching metadata for repetitive requests on a per-query basis");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_HMS_QUERY_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_VIEW_CACHE_ENABLED", 564, "hive.optimize.view.tables.cache.enabled", v, "This property enables caching of views and their underlying tables. The cache in memory may be stale, but  provides an optimization if it is accurate.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_VIEW_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CTE_MATERIALIZE_THRESHOLD", 565, "hive.optimize.cte.materialize.threshold", v, "If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\nbefore executing the main query block. -1 will disable this feature.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CTE_MATERIALIZE_FULL_AGGREGATE_ONLY", 566, "hive.optimize.cte.materialize.full.aggregate.only", v, "If enabled only CTEs with aggregate output will be pre-materialized. All CTEs otherwise.Also the number of references to a CTE clause must exceeds the value of hive.optimize.cte.materialize.threshold");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTE_MATERIALIZE_FULL_AGGREGATE_ONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_ENABLED", 567, "hive.optimize.bi.enabled", v, "Enables query rewrites based on approximate functions(sketches).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_COUNTDISTINCT_ENABLED", 568, "hive.optimize.bi.rewrite.countdistinct.enabled", v, "Enables to rewrite COUNT(DISTINCT(X)) queries to be rewritten to use sketch functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNTDISTINCT_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[1];
v[0] = "hll";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_COUNT_DISTINCT_SKETCH", 569, "hive.optimize.bi.rewrite.countdistinct.sketch", "hll", v, "Defines which sketch type to use when rewriting COUNT(DISTINCT(X)) expressions. Distinct counting can be done with: hll");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_COUNT_DISTINCT_SKETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_ENABLED", 570, "hive.optimize.bi.rewrite.percentile_disc.enabled", v, "Enables to rewrite PERCENTILE_DISC(X) queries to be rewritten to use sketch functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[1];
v[0] = "kll";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_SKETCH", 571, "hive.optimize.bi.rewrite.percentile_disc.sketch", "kll", v, "Defines which sketch type to use when rewriting PERCENTILE_DISC expressions. Options: kll");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_PERCENTILE_DISC_SKETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_ENABLED", 572, "hive.optimize.bi.rewrite.cume_dist.enabled", v, "Enables to rewrite CUME_DIST(X) queries to be rewritten to use sketch functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[1];
v[0] = "kll";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_SKETCH", 573, "hive.optimize.bi.rewrite.cume_dist.sketch", "kll", v, "Defines which sketch type to use when rewriting CUME_DIST expressions. Options: kll");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_CUME_DIST_SKETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_NTILE_ENABLED", 574, "hive.optimize.bi.rewrite.ntile.enabled", v, "Enables to rewrite NTILE(X) queries to be rewritten as sketch functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[1];
v[0] = "kll";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_NTILE_SKETCH", 575, "hive.optimize.bi.rewrite.ntile.sketch", "kll", v, "Defines which sketch type to use when rewriting NTILE expressions. Options: kll");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_NTILE_SKETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_RANK_ENABLED", 576, "hive.optimize.bi.rewrite.rank.enabled", v, "Enables to rewrite RANK() queries to be rewritten to use sketch functions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[1];
v[0] = "kll";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_OPTIMIZE_BI_REWRITE_RANK_SKETCH", 577, "hive.optimize.bi.rewrite.rank.sketch", "kll", v, "Defines which sketch type to use when rewriting RANK expressions. Options: kll");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_OPTIMIZE_BI_REWRITE_RANK_SKETCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_ESTIMATE_STATS", 578, "hive.stats.estimate", v, "Estimate statistics in absence of statistics.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATE_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(20.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_NDV_ESTIMATE_PERC", 579, "hive.stats.ndv.estimate.percent", v, "This many percentage of rows will be estimated as count distinct in absence of statistics.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ESTIMATE_PERC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_JOIN_NDV_READJUSTMENT", 580, "hive.stats.join.ndv.readjustment", v, "Set this to true to use approximation based logic to adjust ndv after join.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_NDV_READJUSTMENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(5.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_NUM_NULLS_ESTIMATE_PERC", 581, "hive.stats.num.nulls.estimate.percent", v, "This many percentage of rows will be estimated as number of nulls in absence of statistics.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NUM_NULLS_ESTIMATE_PERC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESTATSAUTOGATHER", 582, "hive.stats.autogather", v, "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSAUTOGATHER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVESTATSCOLAUTOGATHER", 583, "hive.stats.column.autogather", v, "A flag to gather column statistics automatically.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSCOLAUTOGATHER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$PatternSet;
v = newarray (java.lang.String)[2];
v[0] = "custom";
v[1] = "fs";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$PatternSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVESTATSDBCLASS", 584, "hive.stats.dbclass", "fs", v, "The storage that stores temporary Hive statistics. In filesystem based statistics collection (\'fs\'), \neach task writes statistics it has collected in a file on the filesystem, which will be aggregated \nafter the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTATSDBCLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_DEFAULT_PUBLISHER", 585, "hive.stats.default.publisher", "", "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_PUBLISHER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_DEFAULT_AGGREGATOR", 586, "hive.stats.default.aggregator", "", "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DEFAULT_AGGREGATOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CLIENT_STATS_COUNTERS", 587, "hive.client.stats.counters", "", "Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \nNon-display names should be used");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CLIENT_STATS_COUNTERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_RELIABLE", 588, "hive.stats.reliable", v, "Whether queries will fail because stats cannot be collected completely accurately. \nIf this is set to true, reading/writing from/into a partition may fail because the stats\ncould not be computed accurately.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RELIABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_COLLECT_PART_LEVEL_STATS", 589, "hive.analyze.stmt.collect.partlevel.stats", v, "analyze table T compute statistics for columns. Queries like these should compute partitionlevel stats for partitioned table even when no part spec is specified.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_PART_LEVEL_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_GATHER_NUM_THREADS", 590, "hive.stats.gather.num.threads", v, "Number of threads used by noscan analyze command for partitioned tables.\nThis is applicable only for file formats that implement StatsProvidingRecordReader (like ORC).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_GATHER_NUM_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_COLLECT_TABLEKEYS", 591, "hive.stats.collect.tablekeys", v, "Whether join and group by keys on tables are derived and maintained in the QueryPlan.\nThis is useful to identify how tables are accessed and to determine if they should be bucketed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_TABLEKEYS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_COLLECT_SCANCOLS", 592, "hive.stats.collect.scancols", v, "Whether column accesses are tracked in the QueryPlan.\nThis is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_COLLECT_SCANCOLS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$PatternSet;
v = newarray (java.lang.String)[2];
v[0] = "hll";
v[1] = "fm";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$PatternSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_STATS_NDV_ALGO", 593, "hive.stats.ndv.algo", "hll", v, "hll and fm stand for HyperLogLog and FM-sketch, respectively for computing ndv.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ALGO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_FETCH_BITVECTOR", 594, "hive.stats.fetch.bitvector", v, "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_BITVECTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(20.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_NDV_ERROR", 595, "hive.stats.ndv.error", v, "The standard error allowed for NDV estimates, expressed in percentage. This provides a tradeoff \nbetween accuracy and compute cost. A lower value for the error indicates higher accuracy and a \nhigher compute cost. (NDV means the number of distinct values.). It only affects the FM-Sketch \n(not the HLL algorithm which is the default), where it computes the number of necessary\n bitvectors to achieve the accuracy.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_NDV_ERROR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_ESTIMATORS_ENABLE", 596, "hive.stats.estimators.enable", v, "Estimators are able to provide more accurate column statistic infos for UDF results.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_ESTIMATORS_ENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_STATS_NDV_TUNER", 597, "hive.metastore.stats.ndv.tuner", v, "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \nThe lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\nIts value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_TUNER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION", 598, "hive.metastore.stats.ndv.densityfunction", v, "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_STATS_KEY_PREFIX", 599, "hive.stats.key.prefix", "", "", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_KEY_PREFIX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_MAX_VARIABLE_LENGTH", 600, "hive.stats.max.variable.length", v, "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\naverage row size is multiplied with the total number of rows coming out of each operator.\nAverage row size is computed from average column size of all columns in the row. In the absence\nof column statistics, for variable length columns (like string, bytes etc.), this value will be\nused. For fixed length columns their corresponding Java equivalent sizes are used\n(float - 4 bytes, double - 8 bytes etc.).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_VARIABLE_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_LIST_NUM_ENTRIES", 601, "hive.stats.list.num.entries", v, "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\naverage row size is multiplied with the total number of rows coming out of each operator.\nAverage row size is computed from average column size of all columns in the row. In the absence\nof column statistics and for variable length complex columns like list, the average number of\nentries/values can be specified using this config.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_LIST_NUM_ENTRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_MAP_NUM_ENTRIES", 602, "hive.stats.map.num.entries", v, "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\naverage row size is multiplied with the total number of rows coming out of each operator.\nAverage row size is computed from average column size of all columns in the row. In the absence\nof column statistics and for variable length complex columns like map, the average number of\nentries/values can be specified using this config.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAP_NUM_ENTRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_FETCH_COLUMN_STATS", 603, "hive.stats.fetch.column.stats", v, "Annotation of operator tree with statistics information requires column statistics.\nColumn statistics are fetched from metastore. Fetching column statistics for each needed column\ncan be expensive when the number of columns is high. This flag can be used to disable fetching\nof column statistics from metastore.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_FETCH_COLUMN_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.1F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_JOIN_FACTOR", 604, "hive.stats.join.factor", v, "Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\nuses column statistics to estimate the number of rows flowing out of it and hence the data size.\nIn the absence of column statistics, this factor determines the amount of rows that flows out\nof JOIN operator.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_JOIN_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_CORRELATED_MULTI_KEY_JOINS", 605, "hive.stats.correlated.multi.key.joins", v, "When estimating output rows for a join involving multiple columns, the default behavior assumesthe columns are independent. Setting this flag to true will cause the estimator to assumethe columns are correlated.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_CORRELATED_MULTI_KEY_JOINS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_RANGE_SELECTIVITY_UNIFORM_DISTRIBUTION", 606, "hive.stats.filter.range.uniform", v, "When estimating output rows from a condition, if a range predicate is applied over a column and the\nminimum and maximum values for that column are available, assume uniform distribution of values\nacross that range and scales number of rows proportionally. If this is set to false, default\nselectivity value is used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_RANGE_SELECTIVITY_UNIFORM_DISTRIBUTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(10.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_DESERIALIZATION_FACTOR", 607, "hive.stats.deserialization.factor", v, "Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\nof basic statistics like number of rows and data size, file size is used to estimate the number\nof rows and data size. Since files in tables/partitions are serialized (and optionally\ncompressed) the estimates of number of rows and data size cannot be reliably determined.\nThis factor is multiplied with the file size to account for serialization and compression.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_DESERIALIZATION_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_IN_CLAUSE_FACTOR", 608, "hive.stats.filter.in.factor", v, "Currently column distribution is assumed to be uniform. This can lead to overestimation/underestimation\nin the number of rows filtered by a certain operator, which in turn might lead to overprovision or\nunderprovision of resources. This factor is applied to the cardinality estimation of IN clauses in\nfilter operators.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_CLAUSE_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_IN_MIN_RATIO", 609, "hive.stats.filter.in.min.ratio", v, "Output estimation of an IN filter can\'t be lower than this ratio");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_IN_MIN_RATIO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_UDTF_FACTOR", 610, "hive.stats.udtf.factor", v, "UDTFs change the number of rows of the output. A common UDTF is the explode() method that creates\nmultiple rows for each element in the input array. This factor is applied to the number of\noutput rows and output size.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_UDTF_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_USE_BITVECTORS", 611, "hive.stats.use.bitvectors", v, "Enables to use bitvectors for estimating selectivity.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_USE_BITVECTORS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STATS_MAX_NUM_STATS", 612, "hive.stats.max.num.stats", v, "When the number of stats to be updated is huge, this value is used to control the number of \n stats to be sent to HMS for update.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STATS_MAX_NUM_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(-1L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(2147483647L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_THRIFT_CLIENT_MAX_MESSAGE_SIZE", 613, "hive.thrift.client.max.message.size", "1gb", v, "Thrift client configuration for max message size. 0 or -1 will use the default defined in the Thrift library. The upper limit is 2147483648 bytes (or 2gb).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_THRIFT_CLIENT_MAX_MESSAGE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SUPPORT_CONCURRENCY", 614, "hive.support.concurrency", v, "Whether Hive supports concurrency control or not. \nA ZooKeeper instance must be up and running when using zookeeper Hive lock manager ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_CONCURRENCY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCK_MANAGER", 615, "hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCK_NUMRETRIES", 616, "hive.lock.numretries", v, "The number of times you want to try to get all the locks");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_NUMRETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_UNLOCK_NUMRETRIES", 617, "hive.unlock.numretries", v, "The number of times you want to retry to do one unlock");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_UNLOCK_NUMRETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(9223372036854775807L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 0, v, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOCK_SLEEP_BETWEEN_RETRIES", 618, "hive.lock.sleep.between.retries", "60s", v, "The maximum sleep time between various retries");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_SLEEP_BETWEEN_RETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCK_MAPRED_ONLY", 619, "hive.lock.mapred.only.operation", v, "This param is to control whether or not only do lock on queries\nthat need to execute at least one mapred job.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_MAPRED_ONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCK_QUERY_STRING_MAX_LENGTH", 620, "hive.lock.query.string.max.length", v, "The maximum length of the query string to store in the lock.\nThe default value is 1000000, since the data limit of a znode is 1MB");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_QUERY_STRING_MAX_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MM_ALLOW_ORIGINALS", 621, "hive.mm.allow.originals", v, "Whether to allow original files in MM tables. Conversion to MM may be expensive if\nthis is set to false, however unless MAPREDUCE-7086 fix is present (hadoop 3.1.1+),\nqueries that read non-orc MM tables with original files will fail. The default in\nHive 3.0 is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_ALLOW_ORIGINALS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "dp";
v[2] = "all";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOCK_FILE_MOVE_MODE", 622, "hive.lock.file.move.protect", "all", v, "During file move operations acquires a SEMI_SHARED lock at the table level.none:never; dp: only in case of dynamic partitioning operations; all: all table operations");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCK_FILE_MOVE_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_USE_KERBEROS", 623, "hive.zookeeper.kerberos.enabled", v, "If ZooKeeper is configured for Kerberos authentication. This could be useful when cluster\nis kerberized, but Zookeeper is not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_USE_KERBEROS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_QUORUM", 624, "hive.zookeeper.quorum", "", "List of ZooKeeper servers to talk to. This is needed for: \n. Read/write locks - when hive.lock.manager is set to \norg.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n. When HiveServer supports service discovery via Zookeeper.\n. For delegation token storage if zookeeper store is used, if\nhive.cluster.delegation.token.store.zookeeper.connectString is not set\n. LLAP daemon registry service\n. Leader selection for privilege synchronizer");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_QUORUM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_CLIENT_PORT", 625, "hive.zookeeper.client.port", "2181", "The port of ZooKeeper servers to talk to.\nIf the list of Zookeeper servers specified in hive.zookeeper.quorum\ndoes not contain port numbers, this value is used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLIENT_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ZOOKEEPER_SESSION_TIMEOUT", 626, "hive.zookeeper.session.timeout", "120000ms", v, "ZooKeeper client\'s session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \nif a heartbeat is not sent in the timeout.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SESSION_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ZOOKEEPER_CONNECTION_TIMEOUT", 627, "hive.zookeeper.connection.timeout", "15s", v, "ZooKeeper client\'s connection timeout in seconds. Connection timeout * hive.zookeeper.connection.max.retries\nwith exponential backoff is when curator client deems connection is lost to zookeeper.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_NAMESPACE", 628, "hive.zookeeper.namespace", "hive_zookeeper_namespace", "The parent node under which all ZooKeeper nodes are created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES", 629, "hive.zookeeper.clean.extra.nodes", v, "Clean extra nodes at the end of the session.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES", 630, "hive.zookeeper.connection.max.retries", v, "Max number of times to retry when connecting to the ZooKeeper server.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME", 631, "hive.zookeeper.connection.basesleeptime", "1000ms", v, "Initial amount of time (in milliseconds) to wait between retries\nwhen connecting to the ZooKeeper server when using ExponentialBackoffRetry policy.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_SSL_ENABLE", 632, "hive.zookeeper.ssl.client.enable", v, "Set client to use TLS when connecting to ZooKeeper.  An explicit value overrides any value set via the zookeeper.client.secure system property (note the different name).  Defaults to false if neither is set.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_ENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_SSL_KEYSTORE_LOCATION", 633, "hive.zookeeper.ssl.keystore.location", "", "Keystore location when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.keyStore.location system property (note the camelCase).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_SSL_KEYSTORE_PASSWORD", 634, "hive.zookeeper.ssl.keystore.password", "", "Keystore password when using a client-side certificate with TLS connectivity to ZooKeeper.Overrides any explicit value set via the zookeeper.ssl.keyStore.password system property (note the camelCase).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_KEYSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION", 635, "hive.zookeeper.ssl.truststore.location", "", "Truststore location when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.trustStore.locationsystem property (note the camelCase).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD", 636, "hive.zookeeper.ssl.truststore.password", "", "Truststore password when using a client-side certificate with TLS connectivity to ZooKeeper.Overrides any explicit value set via the zookeeper.ssl.trustStore.password system property (note the camelCase).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_SSL_TRUSTSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_KILLQUERY_ENABLE", 637, "hive.zookeeper.killquery.enable", v, "Whether enabled kill query coordination with zookeeper, when hive.server.support.dynamic.service.discovery is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_ENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE", 638, "hive.zookeeper.killquery.namespace", "killQueries", "When kill query coordination is enabled, uses this namespace for registering queries to kill with zookeeper");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_MANAGER", 639, "hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", "Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\ntransactions, which also requires appropriate settings for hive.compactor.initiator.on,\nhive.compactor.worker.threads, hive.support.concurrency (true),\nand hive.exec.dynamic.partition.mode (nonstrict).\nThe default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\nno transactions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_EXT_LOCKING_ENABLED", 640, "hive.txn.ext.locking.enabled", v, "When enabled use standard R/W lock semantics based on hive.txn.strict.locking.mode for external resources,\ne.g. INSERT will acquire lock based on hive.txn.strict.locking.mode\n(exclusive if it is true, shared if that is false),\nSELECT will acquire shared lock based on hive.txn.nonacid.read.locks.\nWhen disabled no locks are acquired for external resources.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_EXT_LOCKING_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_STRICT_LOCKING_MODE", 641, "hive.txn.strict.locking.mode", v, "In strict mode non-ACID\nresources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\nIn nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\nallows two concurrent writes to the same partition but still lets lock manager prevent\nDROP TABLE etc. when the table is being written to");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STRICT_LOCKING_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_NONACID_READ_LOCKS", 642, "hive.txn.nonacid.read.locks", v, "Flag to turn off the read locks for non-ACID tables, when set to false.\nCould be exercised to improve the performance of non-ACID tables in clusters where read locking is enabled globally to support ACID. Can cause issues with concurrent DDL operations, or slow S3 writes.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_NONACID_READ_LOCKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_READ_LOCKS", 643, "hive.txn.read.locks", v, "Flag to turn off the read locks, when set to false. Although its not recommended, \nbut in performance critical scenarios this option may be exercised.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READ_LOCKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCKS_PARTITION_THRESHOLD", 644, "hive.locks.max.partitions", v, "Locks the entire table if number of partition locks exceeds user-defined threshold. Disabled by default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCKS_PARTITION_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TXN_OVERWRITE_X_LOCK", 645, "hive.txn.xlock.iow", v, "Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\ntransactional tables. This ensures that inserts (w/o overwrite) running concurrently\nare not hidden by the INSERT OVERWRITE.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_OVERWRITE_X_LOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TXN_MERGE_INSERT_X_LOCK", 646, "hive.txn.xlock.mergeinsert", v, "Ensures MERGE INSERT operations acquire EXCLUSIVE / EXCL_WRITE lock for transactional tables.\nIf enabled, prevents duplicates when MERGE statements are executed in parallel transactions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MERGE_INSERT_X_LOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TXN_WRITE_X_LOCK", 647, "hive.txn.xlock.write", v, "Manages concurrency levels for ACID resources. Provides better level of query parallelism by enabling shared writes and write-write conflict resolution at the commit step.- If true - exclusive writes are used:\n  - INSERT OVERWRITE acquires EXCLUSIVE locks\n  - UPDATE/DELETE acquire EXCL_WRITE locks\n  - INSERT acquires SHARED_READ locks\n- If false - shared writes, transaction is aborted in case of conflicting changes:\n  - INSERT OVERWRITE acquires EXCL_WRITE locks\n  - INSERT/UPDATE/DELETE acquire SHARED_READ locks");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_WRITE_X_LOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_STATS_ENABLED", 648, "hive.txn.stats.enabled", v, "Whether Hive supports transactional stats (accurate stats for transactional tables)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_STATS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(120);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_ACID_DIR_CACHE_DURATION", 649, "hive.txn.acid.dir.cache.duration", v, "Enable dir cache for ACID tables specified in minutes.0 indicates cache is used as read-only and no additional info would be populated. -1 means cache is disabled");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_DIR_CACHE_DURATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_WRITE_ACID_VERSION_FILE", 650, "hive.txn.write.acid.version.file", v, "Creates an _orc_acid_version file along with acid files, to store the version data");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_WRITE_ACID_VERSION_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_READONLY_ENABLED", 651, "hive.txn.readonly.enabled", v, "Enables read-only transaction classification and related optimizations");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_READONLY_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_LOCKLESS_READS_ENABLED", 652, "hive.acid.lockless.reads.enabled", v, "Enables lockless reads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_LOCKLESS_READS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_CREATE_TABLE_USE_SUFFIX", 653, "hive.acid.createtable.softdelete", v, "Enables non-blocking DROP TABLE operation.\nIf enabled, every table directory would be suffixed with the corresponding table creation txnId.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_CREATE_TABLE_USE_SUFFIX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_TRUNCATE_USE_BASE", 654, "hive.acid.truncate.usebase", v, "If enabled, truncate for transactional tables will not delete the data directories,\nrather create a new base directory with no datafiles.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_TRUNCATE_USE_BASE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_DROP_PARTITION_USE_BASE", 655, "hive.acid.droppartition.usebase", v, "Enables non-blocking DROP PARTITION operation.\nIf enabled, drop for transactional tables will not delete the data directories,\nrather create a new base directory with no datafiles.\")");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DROP_PARTITION_USE_BASE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_RENAME_PARTITION_MAKE_COPY", 656, "hive.acid.renamepartition.makecopy", v, "Enables non-blocking RENAME PARTITION operation.\nIf enabled, rename for transactional tables will not rename the partition directory,\nrather create a copy of it under the new path.\")");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_RENAME_PARTITION_MAKE_COPY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(500);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TXN_ACID_METRICS_MAX_CACHE_SIZE", 657, "hive.txn.acid.metrics.max.cache.size", v, v, "Size of the ACID metrics cache, i.e. max number of partitions and unpartitioned tables with the most deltas that will be included in the lists of active, obsolete and small deltas. Allowed range is 0 to 500.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_MAX_CACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TXN_ACID_METRICS_REPORTING_INTERVAL", 658, "hive.txn.acid.metrics.reporting.interval", "30s", v, "Reporting period for ACID metrics in seconds.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_REPORTING_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_ACID_METRICS_DELTA_NUM_THRESHOLD", 659, "hive.txn.acid.metrics.delta.num.threshold", v, "The minimum number of active delta files a table/partition must have in order to be included in the ACID metrics report.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_NUM_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_ACID_METRICS_OBSOLETE_DELTA_NUM_THRESHOLD", 660, "hive.txn.acid.metrics.obsolete.delta.num.threshold", v, "The minimum number of obsolete delta files a table/partition must have in order to be included in the ACID metrics report.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_OBSOLETE_DELTA_NUM_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.01F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_ACID_METRICS_DELTA_PCT_THRESHOLD", 661, "hive.txn.acid.metrics.delta.pct.threshold", v, "Percentage (fractional) size of the delta files relative to the base directory. Deltas smaller than this threshold count as small deltas. Default 0.01 = 1%.)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_ACID_METRICS_DELTA_PCT_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TXN_TIMEOUT", 662, "hive.txn.timeout", "300s", v, "time after which transactions are declared aborted if the client has not sent a heartbeat.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE", 663, "hive.txn.heartbeat.threadpool.size", v, "The number of threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer, we need a few");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT", 664, "hive.txn.manager.dump.lock.state.on.acquire.timeout", v, "Set this to true so that when attempt to acquire a lock on resource times out, the current state of the lock manager is dumped to log file.  This is for debugging.  See also hive.lock.numretries and hive.lock.sleep.between.retries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TXN_OPERATIONAL_PROPERTIES", 665, "hive.txn.operational.properties", v, "1: Enable split-update feature found in the newer version of Hive ACID subsystem\n4: Make the table \'quarter-acid\' as it only supports insert. But it doesn\'t require ORC or bucketing.\nThis is intended to be used as an internal property for future versions of ACID. (See\nHIVE-14035 for details.  User sets it tblproperites via transactional_properties.)", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_OPERATIONAL_PROPERTIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MAX_OPEN_TXNS", 666, "hive.max.open.txns", v, "Maximum number of open transactions. If \ncurrent open transactions reach this limit, future open transaction requests will be \nrejected, until this number goes below the limit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAX_OPEN_TXNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COUNT_OPEN_TXNS_INTERVAL", 667, "hive.count.open.txns.interval", "1s", v, "Time in seconds between checks to count open transactions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COUNT_OPEN_TXNS_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_MAX_OPEN_BATCH", 668, "hive.txn.max.open.batch", v, "Maximum number of transactions that can be fetched in one call to open_txns().\nThis controls how many transactions streaming agents such as Flume or Storm open\nsimultaneously. The streaming agent then writes that number of entries into a single\nfile (per Flume agent or Storm bolt). Thus increasing this value decreases the number\nof delta files created by streaming agents. But it also increases the number of open\ntransactions that Hive has to track at any given time, which may negatively affect\nread performance.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_OPEN_BATCH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TXN_RETRYABLE_SQLEX_REGEX", 669, "hive.txn.retryable.sqlex.regex", "", "Comma separated list\nof regular expression patterns for SQL state, error code, and error message of\nretryable SQLExceptions, that\'s suitable for the metastore DB.\nFor example: Can\'t serialize.*,40001$,^Deadlock,.*ORA-08176.*\nThe string that the regex will be matched against is of the following form, where ex is a SQLException:\nex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\"");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_RETRYABLE_SQLEX_REGEX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_INITIATOR_ON", 670, "hive.compactor.initiator.on", v, "Whether to run the initiator and cleaner threads on this metastore instance or not.\nSet this to true on one instance of the Thrift metastore service as part of turning\non Hive transactions. For a complete list of parameters required for turning on\ntransactions, see hive.txn.manager.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_ON> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_WORKER_THREADS", 671, "hive.compactor.worker.threads", v, "How many compactor worker threads to run on this metastore instance. Set this to a\npositive number on one or more instances of the Thrift metastore service as part of\nturning on Hive transactions. For a complete list of parameters required for turning\non transactions, see hive.txn.manager.\nWorker threads spawn MapReduce jobs to do compactions. They do not do the compactions\nthemselves. Increasing the number of worker threads will decrease the time it takes\ntables or partitions to be compacted once they are determined to need compaction.\nIt will also increase the background load on the Hadoop cluster as more MapReduce jobs\nwill be running in the background.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_WORKER_TIMEOUT", 672, "hive.compactor.worker.timeout", "86400s", v, "Time in seconds after which a compaction job will be declared failed and the\ncompaction re-queued.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WORKER_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_CHECK_INTERVAL", 673, "hive.compactor.check.interval", "300s", v, "Time in seconds between checks to see if any tables or partitions need to be\ncompacted. This should be kept high because each check for compaction requires\nmany calls against the NameNode.\nDecreasing this value will reduce the time it takes for compaction to be started\nfor a table or partition that requires compaction. However, checking if compaction\nis needed requires several calls to the NameNode for each table or partition that\nhas had a transaction done on it since the last major compaction. So decreasing this\nvalue will increase the load on the NameNode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_INITIATOR_DURATION_UPDATE_INTERVAL", 674, "hive.compactor.initiator.duration.update.interval", "60s", v, "Time in seconds that drives the update interval of compaction_initiator_duration metric.\nSmaller value results in a fine grained metric update.\nThis updater can be turned off if its value less than or equals to zero.\nIn this case the above metric will be update only after the initiator completed one cycle.\nThe hive.compactor.initiator.on must be turned on (true) in-order to enable the Initiator,\notherwise this setting has no effect.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_INITIATOR_DURATION_UPDATE_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_CLEANER_DURATION_UPDATE_INTERVAL", 675, "hive.compactor.cleaner.duration.update.interval", "60s", v, "Time in seconds that drives the update interval of compaction_cleaner_duration metric.\nSmaller value results in a fine grained metric update.\nThis updater can be turned off if its value less than or equals to zero.\nIn this case the above metric will be update only after the cleaner completed one cycle.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_DURATION_UPDATE_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_REQUEST_QUEUE", 676, "hive.compactor.request.queue", v, "Enables parallelization of the checkForCompaction operation, that includes many file metadata checks\nand may be expensive");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_REQUEST_QUEUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_DELTA_NUM_THRESHOLD", 677, "hive.compactor.delta.num.threshold", v, "Number of delta directories in a table or partition that will trigger a minor\ncompaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_NUM_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_DELTA_PCT_THRESHOLD", 678, "hive.compactor.delta.pct.threshold", v, "Percentage (fractional) size of the delta files relative to the base that will trigger\na major compaction. (1.0 = 100%, so the default 0.1 = 10%.)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELTA_PCT_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(500);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPACTOR_MAX_NUM_DELTA", 679, "hive.compactor.max.num.delta", v, "Maximum number of delta files that the compactor will attempt to handle in a single job.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_MAX_NUM_DELTA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD", 680, "hive.compactor.abortedtxn.threshold", v, "Number of aborted transactions involving a given table or partition that will trigger\na major compaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit HOURS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_ABORTEDTXN_TIME_THRESHOLD", 681, "hive.compactor.aborted.txn.time.threshold", "12h", v, "Age of table/partition\'s oldest aborted transaction when compaction will be triggered. Default time unit is: hours. Set to a negative number to disable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ABORTEDTXN_TIME_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(200);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_ACTIVE_DELTA_DIR_THRESHOLD", 682, "hive.compactor.active.delta.dir.threshold", v, "If the number of active delta directories under a table/partition passes this threshold, a warning message will be logged.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACTIVE_DELTA_DIR_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(200);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_OBSOLETE_DELTA_DIR_THRESHOLD", 683, "hive.compactor.obsolete.delta.dir.threshold", v, "If the number of obsolete delta directories under a table/partition passes this threshold, a warning message will be logged.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_OBSOLETE_DELTA_DIR_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(200);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_SMALL_DELTA_DIR_THRESHOLD", 684, "hive.compactor.small.delta.dir.threshold", v, "If the number of small delta directories under a table/partition passes this threshold, a warning message will be logged.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_SMALL_DELTA_DIR_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MINUTES>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_ACID_METRICS_LOGGER_FREQUENCY", 685, "hive.compactor.acid.metrics.logger.frequency", "360m", v, "Logging frequency of ACID related metrics. Set this value to 0 to completely turn off logging. Default time unit: minutes");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_ACID_METRICS_LOGGER_FREQUENCY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(300000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_WAIT_TIMEOUT", 686, "hive.compactor.wait.timeout", v, "Time out in milliseconds for blocking compaction. It\'s value has to be higher than 2000 milliseconds. ");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_WAIT_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MR_COMPACTOR_GATHER_STATS", 687, "hive.mr.compactor.gather.stats", v, "If set to true MAJOR compaction will gather stats if there are stats already associated with the table/partition.\nTurn this off to save some resources and the stats are not used anyway.\nWorks only for MR based compaction, CRUD based compaction uses hive.stats.autogather.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MR_COMPACTOR_GATHER_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_GATHER_STATS", 688, "hive.compactor.gather.stats", v, "If set to true MAJOR compaction will gather stats if there are stats already associated with the table/partition.\nTurn this off to save some resources and the stats are not used anyway.\nThis is a replacement for the HIVE_MR_COMPACTOR_GATHER_STATS config, and works both for MR and Query based compaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_GATHER_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(20);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("COMPACTOR_INITIATOR_FAILED_THRESHOLD", 689, "hive.compactor.initiator.failed.compacts.threshold", v, v, "Number of consecutive compaction failures (per table/partition) after which automatic compactions will not be scheduled any more.  Note that this must be less than hive.compactor.history.retention.failed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_INITIATOR_FAILED_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_CLEANER_RUN_INTERVAL", 690, "hive.compactor.cleaner.run.interval", "5000ms", v, "Time between runs of the cleaner thread");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RUN_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED", 691, "hive.compactor.delayed.cleanup.enabled", v, "When enabled, cleanup of obsolete files/dirs after compaction can be delayed. This delay \n can be configured by hive configuration hive.compactor.cleaner.retention.time.seconds");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_COMPACTOR_CLEANER_RETENTION_TIME", 692, "hive.compactor.cleaner.retention.time.seconds", "300s", v, "Time to wait before cleanup of obsolete files/dirs after compaction. \nThis is the minimum amount of time the system will wait, since it will not clean before all open transactions are committed, that were opened before the compaction");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_RETENTION_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_CLEANER_THREADS_NUM", 693, "hive.compactor.cleaner.threads.num", v, "Enables parallelization of the cleaning directories after compaction, that includes many file \nrelated checks and may be expensive");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_CLEANER_THREADS_NUM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPACTOR_JOB_QUEUE", 694, "hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\nCompaction jobs will be submitted.  Set to empty string to let Hadoop choose the queue.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_JOB_QUEUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TRANSACTIONAL_CONCATENATE_NOBLOCK", 695, "hive.transactional.concatenate.noblock", v, "Will cause \'alter table T concatenate\' to be non-blocking");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TRANSACTIONAL_CONCATENATE_NOBLOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CONCATENATE_EXTERNAL_TABLE", 696, "hive.concatenate.external.table", v, "Enable concatenate for external tables. This allows \'alter table `tablename` concatenate\' on external tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CONCATENATE_EXTERNAL_TABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPACTOR_COMPACT_MM", 697, "hive.compactor.compact.insert.only", v, "Whether the compactor should compact insert-only tables. A safety switch.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPACTOR_COMPACT_MM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("COMPACTOR_CRUD_QUERY_BASED", 698, "hive.compactor.crud.query.based", v, "Means compaction on full CRUD tables is done via queries. Compactions on insert-only tables will always run via queries regardless of the value of this configuration.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_CRUD_QUERY_BASED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "query";
v[1] = "compactor";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("SPLIT_GROUPING_MODE", 699, "hive.split.grouping.mode", "query", v, "This is set to compactor from within the query based compactor. This enables the Tez SplitGrouper to group splits based on their bucket number, so that all rows from different bucket files  for the same bucket number can end up in the same bucket file after the compaction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_GROUPING_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("COMPACTOR_HISTORY_RETENTION_SUCCEEDED", 700, "hive.compactor.history.retention.succeeded", v, v, "Determines how many successful compaction records will be retained in compaction history for a given table/partition.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_SUCCEEDED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("COMPACTOR_HISTORY_RETENTION_FAILED", 701, "hive.compactor.history.retention.failed", v, v, "Determines how many failed compaction records will be retained in compaction history for a given table/partition.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_FAILED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("COMPACTOR_HISTORY_RETENTION_ATTEMPTED", 702, "hive.compactor.history.retention.attempted", v, v, "Determines how many attempted compaction records will be retained in compaction history for a given table/partition.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_RETENTION_ATTEMPTED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("COMPACTOR_HISTORY_REAPER_INTERVAL", 703, "hive.compactor.history.reaper.interval", "2m", v, "Determines how often compaction history reaper runs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars COMPACTOR_HISTORY_REAPER_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TIMEDOUT_TXN_REAPER_INTERVAL", 704, "hive.timedout.txn.reaper.interval", "180s", v, "Time interval describing how often the reaper runs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TIMEDOUT_TXN_REAPER_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("WRITE_SET_REAPER_INTERVAL", 705, "hive.writeset.reaper.interval", "60s", v, "Frequency of WriteSet reaper runs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars WRITE_SET_REAPER_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MERGE_CARDINALITY_VIOLATION_CHECK", 706, "hive.merge.cardinality.check", v, "Set to true to ensure that each SQL Merge statement ensures that for each row in the target\ntable there is at most 1 matching row in the source table per SQL Specification.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_CARDINALITY_VIOLATION_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SPLIT_UPDATE", 707, "hive.split.update", v, "If true, SQL Update statement will be rewritten to a multi-insert with 2 branches:\nrepresenting delete of existing row and an insert of the new version of the row.\nSimilarly Merge statement will handle WHEN MATCHED UPDATE by splitting it into 2\nbranches of a multi-insert. Updating bucketing and partitioning columns should\nonly be permitted if this is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SPLIT_UPDATE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("MERGE_SPLIT_UPDATE", 708, "hive.merge.split.update", v, "If true, SQL Merge statement will handle WHEN MATCHED UPDATE by splitting it into 2\nbranches of a multi-insert, representing delete of existing row and an insert of\nthe new version of the row.  Updating bucketing and partitioning columns should\nonly be permitted if this is true.\nDeprecated, use hive.split.update instead.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars MERGE_SPLIT_UPDATE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("OPTIMIZE_ACID_META_COLUMNS", 709, "hive.optimize.acid.meta.columns", v, "If true, don\'t decode Acid metadata columns from storage unless they are needed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars OPTIMIZE_ACID_META_COLUMNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(9223372036854775807L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ARROW_ROOT_ALLOCATOR_LIMIT", 710, "hive.arrow.root.allocator.limit", v, "Arrow root allocator memory size limitation in bytes.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_ROOT_ALLOCATOR_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10000000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ARROW_BATCH_ALLOCATOR_LIMIT", 711, "hive.arrow.batch.allocator.limit", v, "Max bytes per arrow batch. This is a threshold, the memory is not pre-allocated.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_ALLOCATOR_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ARROW_BATCH_SIZE", 712, "hive.arrow.batch.size", v, "The number of rows sent in one Arrow batch.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ARROW_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$PatternSet;
v = newarray (java.lang.String)[7];
v[0] = "YEAR";
v[1] = "MONTH";
v[2] = "WEEK";
v[3] = "DAY";
v[4] = "HOUR";
v[5] = "MINUTE";
v[6] = "SECOND";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$PatternSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_DRUID_INDEXING_GRANULARITY", 713, "hive.druid.indexer.segments.granularity", "DAY", v, "Granularity for the segments created by the Druid storage handler");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_INDEXING_GRANULARITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5000000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_MAX_PARTITION_SIZE", 714, "hive.druid.indexer.partition.size.max", v, "Maximum number of records per segment partition");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_PARTITION_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(75000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_MAX_ROW_IN_MEMORY", 715, "hive.druid.indexer.memory.rownum.max", v, "Maximum number of records in memory while storing data in Druid");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_ROW_IN_MEMORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_BROKER_DEFAULT_ADDRESS", 716, "hive.druid.broker.address.default", "localhost:8082", "Address of the Druid broker. If we are querying Druid from Hive, this address needs to be\ndeclared");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BROKER_DEFAULT_ADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS", 717, "hive.druid.coordinator.address.default", "localhost:8081", "Address of the Druid coordinator. It is used to check the load status of newly created segments");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS", 718, "hive.druid.overlord.address.default", "localhost:8090", "Address of the Druid overlord. It is used to submit indexing tasks to druid.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_SELECT_THRESHOLD", 719, "hive.druid.select.threshold", v, "Takes only effect when hive.druid.select.distribute is set to false. \nWhen we can split a Select query, this is the maximum number of rows that we try to retrieve\nper query. In order to do that, we obtain the estimated size for the complete result. If the\nnumber of records of the query results is larger than this threshold, we split the query in\ntotal number of rows/threshold parts across the time dimension. Note that we assume the\nrecords to be split uniformly across the time dimension.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SELECT_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(20);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_NUM_HTTP_CONNECTION", 720, "hive.druid.http.numConnection", v, "Number of connections used by\nthe HTTP client.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_NUM_HTTP_CONNECTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_HTTP_READ_TIMEOUT", 721, "hive.druid.http.read.timeout", "PT1M", "Read timeout period for the HTTP\nclient in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 1 minute.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_HTTP_READ_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_SLEEP_TIME", 722, "hive.druid.sleep.time", "PT10S", "Sleep time between retries in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 10 seconds.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_SLEEP_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_BASE_PERSIST_DIRECTORY", 723, "hive.druid.basePersistDirectory", "", "Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BASE_PERSIST_DIRECTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_ROLLUP", 724, "hive.druid.rollup", v, "Whether to rollup druid rows or not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_ROLLUP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_SEGMENT_DIRECTORY", 725, "hive.druid.storage.storageDirectory", "/druid/segments", "druid deep storage location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_SEGMENT_DIRECTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_METADATA_BASE", 726, "hive.druid.metadata.base", "druid", "Default prefix for metadata tables");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_BASE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$PatternSet;
v = newarray (java.lang.String)[3];
v[0] = "mysql";
v[1] = "postgresql";
v[2] = "derby";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$PatternSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("DRUID_METADATA_DB_TYPE", 727, "hive.druid.metadata.db.type", "mysql", v, "Type of the metadata database.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_METADATA_DB_USERNAME", 728, "hive.druid.metadata.username", "", "Username to connect to Type of the metadata DB.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_USERNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_METADATA_DB_PASSWORD", 729, "hive.druid.metadata.password", "", "Password to connect to Type of the metadata DB.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_METADATA_DB_URI", 730, "hive.druid.metadata.uri", "", "URI to connect to the database (for example jdbc:mysql://hostname:port/DBName).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_URI> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("DRUID_WORKING_DIR", 731, "hive.druid.working.directory", "/tmp/workingDirectory", "Default hdfs working directory used to store some intermediate metadata");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_WORKING_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_MAX_TRIES", 732, "hive.druid.maxTries", v, "Maximum number of retries before giving up");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_MAX_TRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(30000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_PASSIVE_WAIT_TIME", 733, "hive.druid.passiveWaitTimeMs", v, "Wait time in ms default to 30 seconds.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_PASSIVE_WAIT_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$PatternSet;
v = newarray (java.lang.String)[2];
v[0] = "roaring";
v[1] = "concise";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$PatternSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_DRUID_BITMAP_FACTORY_TYPE", 734, "hive.druid.bitmap.type", "roaring", v, "Coding algorithm use to encode the bitmaps");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_BITMAP_FACTORY_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRUID_KERBEROS_ENABLE", 735, "hive.druid.kerberos.enable", v, "Enable/Disable Kerberos authentication explicitly while connecting to a druid cluster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRUID_KERBEROS_ENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HBASE_WAL_ENABLED", 736, "hive.hbase.wal.enabled", v, "Whether writes to HBase should be forced to the write-ahead log. \nDisabling this improves HBase write performance at the risk of lost writes in case of a crash.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_WAL_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HBASE_GENERATE_HFILES", 737, "hive.hbase.generatehfiles", v, "True when HBaseStorageHandler should generate hfiles instead of operate against the online table.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_GENERATE_HFILES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HBASE_SNAPSHOT_NAME", 738, "hive.hbase.snapshot.name", null, "The HBase table snapshot name to use.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HBASE_SNAPSHOT_RESTORE_DIR", 739, "hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to restore the HBase table snapshot.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HBASE_SNAPSHOT_RESTORE_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SECURITY_HBASE_URLENCODE_AUTHORIZATION_URI", 740, "hive.security.hbase.urlencode.authorization.uri", v, "When true it URL encodes the URI generated by HBaseStorageHandler for authorization. The URI consists of theHBase table name, column family, etc. and may contain characters that need encoding, such as #. If set to true, the corresponding Ranger policies need to be in URL encoded format too.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_HBASE_URLENCODE_AUTHORIZATION_URI> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_KUDU_MASTER_ADDRESSES_DEFAULT", 741, "hive.kudu.master.addresses.default", "localhost:7050", "Comma-separated list of all of the Kudu master addresses.\nThis value is only used for a given table if the kudu.master_addresses table property is not set.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_KUDU_MASTER_ADDRESSES_DEFAULT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEARCHIVEENABLED", 742, "hive.archive.enabled", v, "Whether archiving operations are permitted");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEARCHIVEENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "minimal";
v[2] = "more";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVEFETCHTASKCONVERSION", 743, "hive.fetch.task.conversion", "more", v, "Some select queries can be converted to single FETCH task minimizing latency.\nCurrently the query should be single sourced not having any subquery and should not have\nany aggregations or distincts (which incurs RS), lateral views and joins.\n. none : disable hive.fetch.task.conversion\n. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEFETCHTASKCACHING", 744, "hive.fetch.task.caching", v, "Enabling the caching of the result of fetch tasks eliminates the chance of running into a failing read. On the other hand, if enabled, the hive.fetch.task.conversion.threshold must be adjusted accordingly. That is 1GB by default which must be lowered in case of enabled caching to prevent the consumption of too much memory.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCACHING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1073741824L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEFETCHTASKCONVERSIONTHRESHOLD", 745, "hive.fetch.task.conversion.threshold", v, "Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\nis calculated by summation of file lengths. If it\'s not native, storage handler for the table\ncan optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKCONVERSIONTHRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEFETCHTASKAGGR", 746, "hive.fetch.task.aggr", v, "Aggregation queries with no group-by clause (for example, select count(*) from src) execute\nfinal aggregations in single reduce task. If this is set true, Hive delegates final aggregation\nstage to fetch task, possibly decreasing the query time.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHTASKAGGR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTIMIZEMETADATAQUERIES", 747, "hive.compute.query.using.stats", v, "When set to true Hive will answer a few queries like count(1) purely using stats\nstored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\nFor more advanced stats collection need to run analyze table queries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTIMIZEMETADATAQUERIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEFETCHOUTPUTSERDE", 748, "hive.fetch.output.serde", "org.apache.hadoop.hive.serde.DelimitedJSONSerDe", "The SerDe used by FetchTask to serialize the fetch output.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEFETCHOUTPUTSERDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEEXPREVALUATIONCACHE", 749, "hive.cache.expr.evaluation", v, "If true, the evaluation result of a deterministic expression referenced twice or more\nwill be cached.\nFor example, in a filter condition like \'.. where key + 10 = 100 or key + 10 = 0\'\nthe expression \'key + 10\' will be evaluated/cached once and reused for the following\nexpression (\'key + 10 = 0\'). Currently, this is applied only to expressions in select\nor filter operators.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPREVALUATIONCACHE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEVARIABLESUBSTITUTE", 750, "hive.variable.substitute", v, "This enables substitution using syntax like ${var} ${system:var} and ${env:var}.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(40);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEVARIABLESUBSTITUTEDEPTH", 751, "hive.variable.substitute.depth", v, "The maximum replacements the substitution engine will do.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEVARIABLESUBSTITUTEDEPTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECONFVALIDATION", 752, "hive.conf.validation", v, "Enables type checking for registered Hive configurations");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECONFVALIDATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SEMANTIC_ANALYZER_HOOK", 753, "hive.semantic.analyzer.hook", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SEMANTIC_ANALYZER_HOOK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE", 754, "hive.test.authz.sstd.hs.mode", v, "test hs mode from .q tests", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_ENABLED", 755, "hive.security.authorization.enabled", v, "enable or disable the Hive client authorization");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME", 756, "hive.security.authorization.kerberos.use.shortname", v, "use short name in Kerberos cluster");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_MANAGER", 757, "hive.security.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory", "The Hive client authorization manager class name. The user defined authorization class should implement \ninterface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHENTICATOR_MANAGER", 758, "hive.security.authenticator.manager", "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator", "hive client authenticator manager class name. The user defined authenticator should implement \ninterface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHENTICATOR_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_AUTHORIZATION_MANAGER", 759, "hive.security.metastore.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider", "Names of authorization manager classes (comma separated) to be used in the metastore\nfor authorization. The user defined authorization class should implement interface\norg.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\nAll authorization manager classes have to successfully authorize the metastore API\ncall for the command execution to be allowed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_AUTHORIZATION_AUTH_READS", 760, "hive.security.metastore.authorization.auth.reads", v, "If this is true, metastore authorizer authorizes read actions on database, table");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHORIZATION_AUTH_READS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METASTORE_AUTHENTICATOR_MANAGER", 761, "hive.security.metastore.authenticator.manager", "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", "authenticator manager class name to be used in the metastore for authentication. \nThe user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METASTORE_AUTHENTICATOR_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TABLE_USER_GRANTS", 762, "hive.security.authorization.createtable.user.grants", "", "the privileges automatically granted to some users whenever a table gets created.\nAn example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\nand grant create privilege to userZ whenever a new table created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_USER_GRANTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS", 763, "hive.security.authorization.createtable.group.grants", "", "the privileges automatically granted to some groups whenever a table gets created.\nAn example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\nand grant create privilege to groupZ whenever a new table created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS", 764, "hive.security.authorization.createtable.role.grants", "", "the privileges automatically granted to some roles whenever a table gets created.\nAn example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\nand grant create privilege to roleZ whenever a new table created.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS", 765, "hive.security.authorization.createtable.owner.grants", "", "The privileges automatically granted to the owner whenever a table gets created.\nAn example like \"select,drop\" will grant select and drop privilege to the owner\nof the table. Note that the default gives the creator of a table no access to the\ntable (but see HIVE-8067).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TASK_FACTORY", 766, "hive.security.authorization.task.factory", "org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl", "Authorization DDL task factory implementation");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TASK_FACTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_TABLES_ON_STORAGEHANDLERS", 767, "hive.security.authorization.tables.on.storagehandlers", v, "Enables authorization on tables with custom storage handlers as implemented by HIVE-24705. Default setting is true. Useful for turning the feature off if the corresponding ranger patch is missing.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_TABLES_ON_STORAGEHANDLERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST", 768, "hive.security.authorization.sqlstd.confwhitelist", "", "A Java regex. Configurations parameters that match this\nregex can be modified by user when SQL standard authorization is enabled.\nTo get the default value, use the \'set <param>\' command.\nNote that the hive.conf.restricted.list checks are still enforced after the white list\ncheck");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND", 769, "hive.security.authorization.sqlstd.confwhitelist.append", "", "2nd Java regex that it would match in addition to\nhive.security.authorization.sqlstd.confwhitelist.\nDo not include a starting \"|\" in the value. Using this regex instead\nof updating the original regex means that you can append to the default\nset by SQL standard authorization instead of replacing it entirely.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CLI_PRINT_HEADER", 770, "hive.cli.print.header", v, "Whether to print the names of the columns in query output.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_HEADER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CLI_PRINT_ESCAPE_CRLF", 771, "hive.cli.print.escape.crlf", v, "Whether to print carriage returns and line feeds in row output as escaped \\r and \\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_PRINT_ESCAPE_CRLF> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CLI_TEZ_SESSION_ASYNC", 772, "hive.cli.tez.session.async", v, "Whether to start Tez\nsession in background when running CLI with Tez, allowing CLI to be available earlier.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CLI_TEZ_SESSION_ASYNC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS", 773, "hive.disable.unsafe.external.table.operations", v, "Whether to disable certain optimizations and operations on external tables, on the assumption that data changes by external applications may have negative effects on these operations.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_STRICT_MANAGED_TABLES", 774, "hive.strict.managed.tables", v, "Whether strict managed tables mode is enabled. With this mode enabled, only transactional tables (both full and insert-only) are allowed to be created as managed tables");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_STRICT_MANAGED_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXTERNALTABLE_PURGE_DEFAULT", 775, "hive.external.table.purge.default", v, "Set to true to set external.table.purge=true on newly created external tables, which will specify that the table data should be deleted when the table is dropped. Set to false maintain existing behavior that external tables do not delete data when the table is dropped.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXTERNALTABLE_PURGE_DEFAULT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ERROR_ON_EMPTY_PARTITION", 776, "hive.error.on.empty.partition", v, "Whether to throw an exception if dynamic partition insert generates empty results.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ERROR_ON_EMPTY_PARTITION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXIM_URI_SCHEME_WL", 777, "hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s,s3a,gs", "A comma separated list of acceptable URI schemes for import and export.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_URI_SCHEME_WL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES", 778, "hive.exim.strict.repl.tables", v, "Parameter that determines if \'regular\' (non-replication) export dumps can be\nimported on to tables that are the target of replication. If this parameter is\nset, regular imports will check if the destination table(if it exists) has a \'repl.last.id\' set on it. If so, it will fail.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_REPL_TASK_FACTORY", 779, "hive.repl.task.factory", "org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory", "Parameter that can be used to override which ReplicationTaskFactory will be\nused to instantiate ReplicationTask events. Override for third party repl plugins");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REPL_TASK_FACTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("REPL_FILTER_TRANSACTIONS", 780, "hive.repl.filter.transactions", v, "Enable transaction event filtering to save dump space.\nWhen true, transactions are implicitly opened during REPL DUMP.\nThe default setting is false");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars REPL_FILTER_TRANSACTIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS", 781, "hive.mapper.cannot.span.multiple.partitions", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_REWORK_MAPREDWORK", 782, "hive.rework.mapredwork", v, "should rework the mapred work or not.\nThis is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_REWORK_MAPREDWORK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_IO_EXCEPTION_HANDLERS", 783, "hive.io.exception.handlers", "", "A list of io exception handler class names. This is used\nto construct a list exception handlers to handle exceptions thrown\nby record readers");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_IO_EXCEPTION_HANDLERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG4J_FILE", 784, "hive.log4j.file", "", "Hive log4j configuration file.\nIf the property is not set, then logging will be initialized using hive-log4j.properties found on the classpath.\nIf the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \nwhich you can then extract a URL from and pass to PropertyConfigurator.configure(URL).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG4J_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXEC_LOG4J_FILE", 785, "hive.exec.log4j.file", "", "Hive log4j configuration file for execution mode(sub command).\nIf the property is not set, then logging will be initialized using hive-exec-log4j.properties found on the classpath.\nIf the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \nwhich you can then extract a URL from and pass to PropertyConfigurator.configure(URL).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_LOG4J_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ASYNC_LOG_ENABLED", 786, "hive.async.log.enabled", v, "Whether to enable Log4j2\'s asynchronous logging. Asynchronous logging can give\n significant performance improvement as logging will be handled in separate thread\n that uses LMAX disruptor queue for buffering log messages.\n Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n drawbacks.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_LOG_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG_EXPLAIN_OUTPUT", 787, "hive.log.explain.output", v, "Whether to log explain output for every query.\nWhen enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG_EXPLAIN_OUTPUT_TO_CONSOLE", 788, "hive.log.explain.output.to.console", v, "Weather to make output from hive.log.explain.output log to console instead of normal logger");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_TO_CONSOLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG_EXPLAIN_OUTPUT_INCLUDE_EXTENDED", 789, "hive.log.explain.output.include.extended", v, "Weather to include details in explain printed from hive.log.explain.output");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_EXPLAIN_OUTPUT_INCLUDE_EXTENDED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_EXPLAIN_USER", 790, "hive.explain.user", v, "Whether to show explain result at user level.\nWhen enabled, will log EXPLAIN output for the query at user level. Tez only.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXPLAIN_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL", 791, "hive.autogen.columnalias.prefix.label", "_c", "String used as a prefix when auto generating column alias.\nBy default the prefix label will be appended with a column position number to form the column alias. \nAuto generation would happen if an aggregate function is used in a select clause without an explicit alias.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME", 792, "hive.autogen.columnalias.prefix.includefuncname", v, "Whether to include function name in the column alias auto generated by Hive.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "org.apache.hadoop.hive.common.metrics.metrics.CodahaleMetrics";
v[1] = "org.apache.hadoop.hive.common.metrics.LegacyMetrics";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_METRICS_CLASS", 793, "hive.service.metrics.class", "org.apache.hadoop.hive.common.metrics.metrics.CodahaleMetrics", v, "Hive metrics subsystem implementation class.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CODAHALE_METRICS_REPORTER_CLASSES", 794, "hive.service.metrics.codahale.reporter.classes", "org.apache.hadoop.hive.common.metrics.metrics.JsonFileMetricsReporter, org.apache.hadoop.hive.common.metrics.metrics.JmxMetricsReporter", "Comma separated list of reporter implementation classes for metric class org.apache.hadoop.hive.common.metrics.metrics.CodahaleMetrics. Overrides HIVE_METRICS_REPORTER conf if present");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CODAHALE_METRICS_REPORTER_CLASSES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METRICS_REPORTER", 795, "hive.service.metrics.reporter", "", "Reporter implementations for metric class org.apache.hadoop.hive.common.metrics.metrics.CodahaleMetrics;Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuration will be overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_REPORTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METRICS_JSON_FILE_LOCATION", 796, "hive.service.metrics.file.location", "/tmp/report.json", "For metric class org.apache.hadoop.hive.common.metrics.metrics.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file.  This file will get overwritten at every interval.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_METRICS_JSON_FILE_INTERVAL", 797, "hive.service.metrics.file.frequency", "5000ms", v, "For metric class org.apache.hadoop.hive.common.metrics.metrics.JsonFileMetricsReporter, the frequency of updating JSON metrics file.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_JSON_FILE_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_METRICS_HADOOP2_INTERVAL", 798, "hive.service.metrics.hadoop.frequency", "30s", v, "For metric class org.apache.hadoop.hive.common.metrics.metrics.Metrics2Reporter, the frequency of updating the HADOOP2 metrics system.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_METRICS_HADOOP2_COMPONENT_NAME", 799, "hive.service.metrics.hadoop.component", "hive", "Component name to provide to Hadoop Metrics system. Ideally \'hivemetastore\' for the MetaStore  and and \'hiveserver2\' for HiveServer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_METRICS_HADOOP2_COMPONENT_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PERF_LOGGER", 800, "hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger", "The class responsible for logging client side performance metrics. \nMust be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PERF_LOGGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_START_CLEANUP_SCRATCHDIR", 801, "hive.start.cleanup.scratchdir", v, "To cleanup the Hive scratchdir when starting the Hive Server");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_START_CLEANUP_SCRATCHDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SCRATCH_DIR_LOCK", 802, "hive.scratchdir.lock", v, "To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCRATCH_DIR_LOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_INSERT_INTO_MULTILEVEL_DIRS", 803, "hive.insert.into.multilevel.dirs", v, "Where to insert into multilevel directories like\n\"insert directory \'/HIVEFT25686/china/\' from table\"");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_MULTILEVEL_DIRS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CTAS_EXTERNAL_TABLES", 804, "hive.ctas.external.tables", v, "whether CTAS for external tables is allowed");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CTAS_EXTERNAL_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_INSERT_INTO_EXTERNAL_TABLES", 805, "hive.insert.into.external.tables", v, "whether insert into external tables is allowed");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INSERT_INTO_EXTERNAL_TABLES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "memory";
v[1] = "ssd";
v[2] = "default";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TEMPORARY_TABLE_STORAGE", 806, "hive.exec.temporary.table.storage", "default", v, "Define the storage policy for temporary tables.Choices between memory, ssd and default");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEMPORARY_TABLE_STORAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_LIFETIME_HOOKS", 807, "hive.query.lifetime.hooks", "", "A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered before/after query compilation and before/after query execution, in the order specified.Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they arespecified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre and post query parsing");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_LIFETIME_HOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DRIVER_RUN_HOOKS", 808, "hive.exec.driver.run.hooks", "", "A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning and end of Driver.run, these will be run in the order specified.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DRIVER_RUN_HOOKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DDL_OUTPUT_FORMAT", 809, "hive.ddl.output.format", null, "The data format to use for DDL output.  One of \"text\" (for human\nreadable text) or \"json\" (for a json object).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DDL_OUTPUT_FORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ENTITY_SEPARATOR", 810, "hive.entity.separator", "@", "Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ENTITY_SEPARATOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CAPTURE_TRANSFORM_ENTITY", 811, "hive.entity.capture.transform", v, "Compiler to capture transform URI referred in the query");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CAPTURE_TRANSFORM_ENTITY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY", 812, "hive.display.partition.cols.separately", v, "In older Hive version (0.10 and earlier) no distinction was made between\npartition columns or non-partition columns while displaying columns in describe\ntable. From 0.12 onwards, they are displayed separately. This flag will let you\nget old behavior, if desired. See, test-case in patch for HIVE-6689.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LINEAGE_INFO", 813, "hive.lineage.hook.info.enabled", v, "Whether Hive provides lineage information to hooks.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LINEAGE_INFO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SSL_PROTOCOL_BLACKLIST", 814, "hive.ssl.protocol.blacklist", "SSLv,SSLv3", "SSL Versions to disable for all Hive Servers");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SSL_PROTOCOL_BLACKLIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PRIVILEGE_SYNCHRONIZER", 815, "hive.privilege.synchronizer", v, "Whether to synchronize privileges from external authorizer periodically in HS2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL", 816, "hive.privilege.synchronizer.interval", "1800s", v, "Interval to synchronize privileges from external authorizer periodically in HS2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR", 817, "hive.server.clear.dangling.scratchdir", v, "Clear dangling scratch dir periodically in HS2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL", 818, "hive.server.clear.dangling.scratchdir.interval", "1800s", v, "Interval to clear dangling scratch dir periodically in HS2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(9223372036854775807L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS", 819, "hive.server.sleep.interval.between.start.attempts", "60s", v, "Amount of time to sleep between HiveServer start attempts. Primarily meant for tests");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(30L);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, null);
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String)>(v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Number of times HiveServer will attempt to start before exiting. The sleep interval between retries is determined by \u0001\n The default of 30 will keep trying for 30 minutes.");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_MAX_START_ATTEMPTS", 820, "hive.server.max.start.attempts", v, v, v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MAX_START_ATTEMPTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY", 821, "hive.server.support.dynamic.service.discovery", v, "Whether HiveServer supports dynamic service discovery for its clients. To support this, each instance of HiveServer currently uses ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum in their connection string.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ZOOKEEPER_NAMESPACE", 822, "hive.server.zookeeper.namespace", "hiveserver2", "The parent node in ZooKeeper used by HiveServer when supporting dynamic service discovery.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS", 823, "hive.server.zookeeper.publish.configs", v, "Whether we should publish HiveServer2\'s configs to ZooKeeper.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TRUSTED_PROXY_TRUSTHEADER", 824, "hive.server.proxy.trustheader", "", "This config indicates whether the connection is authenticated before the requests lands on HiveServer, So that we canavoid the authentication is again in HS2. Default value is empty, if it\'s value is set to some header say \'X-Trusted-Proxy-Auth-Header\' then we need to look for this header in the connection string, if present we directly extract the client name from header.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_PROXY_TRUSTHEADER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION", 825, "hive.server.global.init.file.location", "${env:HIVE_CONF_DIR}", "Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \nproperty is set, the value must be a valid path to an init file or directory where the init file is located.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <org.apache.hadoop.hive.conf.HiveServer2TransportMode: org.apache.hadoop.hive.conf.HiveServer2TransportMode binary>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveServer2TransportMode: java.lang.String toString()>();
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v = <org.apache.hadoop.hive.conf.HiveServer2TransportMode: org.apache.hadoop.hive.conf.HiveServer2TransportMode binary>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveServer2TransportMode: java.lang.String toString()>();
v[0] = v;
v = <org.apache.hadoop.hive.conf.HiveServer2TransportMode: org.apache.hadoop.hive.conf.HiveServer2TransportMode http>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveServer2TransportMode: java.lang.String toString()>();
v[1] = v;
v = <org.apache.hadoop.hive.conf.HiveServer2TransportMode: org.apache.hadoop.hive.conf.HiveServer2TransportMode all>;
v = virtualinvoke v.<org.apache.hadoop.hive.conf.HiveServer2TransportMode: java.lang.String toString()>();
v[2] = v;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TRANSPORT_MODE", 826, "hive.server.transport.mode", v, v, "Transport mode of HiveServer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRANSPORT_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_BIND_HOST", 827, "hive.server.thrift.bind.host", "", "Bind host on which to run the HiveServer Thrift service.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_BIND_HOST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PARALLEL_COMPILATION", 828, "hive.driver.parallel.compilation", v, "Whether to\nenable parallel compilation of the queries between sessions and within the same session on HiveServer. The default is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT", 829, "hive.driver.parallel.compilation.global.limit", v, "Determines the degree of parallelism for queries compilation between sessions on HiveServer. The default is -1.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_COMPILATION_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_COMPILE_LOCK_TIMEOUT", 830, "hive.server.compile.lock.timeout", "0s", v, "Number of seconds a request will wait to acquire the compile lock before giving up. Setting it to 0s disables the timeout.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_COMPILE_LOCK_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PARALLEL_OPS_IN_SESSION", 831, "hive.server.parallel.ops.in.session", v, "Whether to allow several parallel operations (such as SQL statements) in one session.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PARALLEL_OPS_IN_SESSION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "DEFAULT";
v[1] = "DUMMY";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL", 832, "hive.server.materializedviews.registry.impl", "DEFAULT", v, "The implementation that we should use for the materialized views registry. \n  DEFAULT: Default cache for materialized views\n  DUMMY: Do not cache materialized views and hence forward requests to metastore");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_REFRESH", 833, "hive.server.materializedviews.registry.refresh.period", "1500s", v, "Period, specified in seconds, between successive refreshes of the registry to pull new materializations from the metastore that may have been created by other HS2 instances.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_REFRESH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_BIND_HOST", 834, "hive.server.webui.host", "0.0.0.0", "The host address the HiveServer WebUI will listen on");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_BIND_HOST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10002);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_PORT", 835, "hive.server.webui.port", v, "The port the HiveServer WebUI will listen on. This can beset to 0 or a negative integer to disable the web UI");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(50);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_MAX_THREADS", 836, "hive.server.webui.max.threads", v, "The max HiveServer WebUI threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_USE_SSL", 837, "hive.server.webui.use.ssl", v, "Set this to true for using SSL encryption for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SSL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH", 838, "hive.server.webui.keystore.path", "", "SSL certificate keystore location for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD", 839, "hive.server.webui.keystore.password", "", "SSL certificate keystore password for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SSL_KEYSTORE_TYPE", 840, "hive.server.webui.keystore.type", "", "SSL certificate keystore type for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SSL_EXCLUDE_CIPHERSUITES", 841, "hive.server.webui.exclude.ciphersuites", "", "SSL a list of exclude cipher suite names or regular expressions separated by comma for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_EXCLUDE_CIPHERSUITES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SSL_KEYMANAGERFACTORY_ALGORITHM", 842, "hive.server.webui.keymanagerfactory.algorithm", "", "SSL certificate key manager factory algorithm for HiveServer WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYMANAGERFACTORY_ALGORITHM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_USE_SPNEGO", 843, "hive.server.webui.use.spnego", v, "If true, the HiveServer WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_SPNEGO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB", 844, "hive.server.webui.spnego.keytab", "", "The path to the Kerberos Keytab file containing the HiveServer WebUI SPNEGO service principal.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL", 845, "hive.server.webui.spnego.principal", "HTTP/_HOST@EXAMPLE.COM", "The HiveServer WebUI SPNEGO service principal.\nThe special string _HOST will be replaced automatically with \nthe value of hive.server.webui.host or the correct host name.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(25);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES", 846, "hive.server.webui.max.historic.queries", v, "The maximum number of past queries to show in HiverSever WebUI.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_USE_PAM", 847, "hive.server.webui.use.pam", v, "If true, the HiveServer WebUI will be secured with PAM.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_USE_PAM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT", 848, "hive.server.webui.explain.output", v, "When set to true, the EXPLAIN output for every query is displayed in the HS2 WebUI / Drilldown / Query Plan tab.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_EXPLAIN_OUTPUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SHOW_GRAPH", 849, "hive.server.webui.show.graph", v, "Set this to true to to display query plan as a graph instead of text in the WebUI. Only works with hive.server.webui.explain.output set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_GRAPH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(25);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE", 850, "hive.server.webui.max.graph.size", v, "Max number of stages graph can display. If number of stages exceeds this, no queryplan will be shown. Only works when hive.server.webui.show.graph and hive.server.webui.explain.output set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_MAX_GRAPH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_SHOW_STATS", 851, "hive.server.webui.show.stats", v, "Set this to true to to display statistics for MapReduce tasks in the WebUI. Only works when hive.server.webui.show.graph and hive.server.webui.explain.output set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SHOW_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_ENABLE_CORS", 852, "hive.server.webui.enable.cors", v, "Whether to enable cross origin requests (CORS)\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_ENABLE_CORS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS", 853, "hive.server.webui.cors.allowed.origins", "*", "Comma separated list of origins that are allowed when CORS is enabled.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS", 854, "hive.server.webui.cors.allowed.methods", "GET,POST,DELETE,HEAD", "Comma separated list of http methods that are allowed when CORS is enabled.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS", 855, "hive.server.webui.cors.allowed.headers", "X-Requested-With,Content-Type,Accept,Origin", "Comma separated list of http headers that are allowed when CORS is enabled.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_XFRAME_ENABLED", 856, "hive.server.webui.xframe.enabled", v, "Whether to enable xframe\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WEBUI_XFRAME_VALUE", 857, "hive.server.webui.xframe.value", "SAMEORIGIN", "Configuration to allow the user to set the x_frame-options value\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_XFRAME_VALUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SHOW_OPERATION_DRILLDOWN_LINK", 858, "hive.server.show.operation.drilldown.link", v, "Whether to show the operation\'s drilldown link to thrift client.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SHOW_OPERATION_DRILLDOWN_LINK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE", 859, "hive.server.active.passive.ha.enable", v, "Whether HiveServer Active/Passive High Availability be enabled when Hive Interactive sessions are enabled.This will also require hive.server.support.dynamic.service.discovery to be enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE", 860, "hive.server.active.passive.ha.registry.namespace", "hs2ActivePassiveHA", "When HiveServer Active/Passive High Availability is enabled, uses this namespace for registering HS2\ninstances with zookeeper");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE", 861, "hive.server.tez.interactive.queue", "", "A single YARN queues to use for Hive Interactive sessions. When this is specified,\nworkload management is enabled and used for these sessions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WM_NAMESPACE", 862, "hive.server.wm.namespace", "default", "The WM namespace to use when one metastore is used by multiple compute clusters each \nwith their own workload management. The special value \'default\' (the default) will \nalso include any resource plans created before the namespaces were introduced.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WM_WORKER_THREADS", 863, "hive.server.wm.worker.threads", v, "Number of worker threads to use to perform the synchronous operations with Tez\nsessions for workload management (e.g. opening, closing, etc.)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_WORKER_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC", 864, "hive.server.wm.allow.any.pool.via.jdbc", v, "Applies when a user specifies a target WM pool in the JDBC connection string. If\nfalse, the user can only specify a pool he is mapped to (e.g. make a choice among\nmultiple group mappings); if true, the user can specify any existing pool.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WM_POOL_METRICS", 865, "hive.server.wm.pool.metrics", v, "Whether per-pool WM metrics should be enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_POOL_METRICS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT", 866, "hive.server.tez.wm.am.registry.timeout", "30s", v, "The timeout for AM registry registration, after which (on attempting to use the\nsession), we kill it and try to get another one.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_WM_DELAYED_MOVE", 867, "hive.server.wm.delayed.move", v, "Determines behavior of the wm move trigger when destination pool is full.\nIf true, the query will run in source pool as long as possible if destination pool is full;\nif false, the query will be killed if destination pool is full.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_WM_DELAYED_MOVE_TIMEOUT", 868, "hive.server.wm.delayed.move.timeout", "3600", v, "The amount of time a delayed move is allowed to run in the source pool,\nwhen a delayed move session times out, the session is moved to the destination pool.\nA value of 0 indicates no timeout");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_WM_DELAYED_MOVE_VALIDATOR_INTERVAL", 869, "hive.server.wm.delayed.move.validator.interval", "60", v, "Interval for checking for expired delayed moves.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WM_DELAYED_MOVE_VALIDATOR_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_DEFAULT_QUEUES", 870, "hive.server.tez.default.queues", "", "A list of comma separated values corresponding to YARN queues of the same name.\nWhen HiveServer is launched in Tez mode, this configuration needs to be set\nfor multiple Tez sessions to run in parallel on the cluster.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_DEFAULT_QUEUES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE", 871, "hive.server.tez.sessions.per.default.queue", v, "A positive integer that determines the number of Tez sessions that should be\nlaunched on each of the queues specified by \"hive.server.tez.default.queues\".\nDetermines the parallelism on each queue.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS", 872, "hive.server.tez.initialize.default.sessions", v, "This flag is used in HiveServer to enable a user to use HiveServer without\nturning on Tez for HiveServer. The user could potentially want to run queries\nover Tez without the pool of sessions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK", 873, "hive.server.tez.queue.access.check", v, "Whether to check user access to explicitly specified YARN queues. yarn.resourcemanager.webapp.address must be configured to use this.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit HOURS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TEZ_SESSION_LIFETIME", 874, "hive.server.tez.session.lifetime", "162h", v, "The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\nSet to 0 to disable session expiration.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit HOURS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER", 875, "hive.server.tez.session.lifetime.jitter", "3h", v, "The jitter for Tez session lifetime; prevents all the sessions from restarting at once.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(16);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS", 876, "hive.server.tez.sessions.init.threads", v, "If hive.server.tez.initialize.default.sessions is enabled, the maximum number of\nthreads to use to initialize the default sessions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS", 877, "hive.server.tez.sessions.restricted.configs", "", "The configuration settings that cannot be set when submitting jobs to HiveServer. If\nany of these are set to values different from those in the server configuration, an\nexception will be thrown.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "true";
v[1] = "false";
v[2] = "ignore";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED", 878, "hive.server.tez.sessions.custom.queue.allowed", "true", v, "Whether Tez session pool should allow submitting queries to custom queues. The options\nare true, false (error out), ignore (accept the query but ignore the queue setting).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LOGGING_OPERATION_ENABLED", 879, "hive.server.logging.operation.enabled", v, "When true, HS2 will save operation logs and make them available for clients");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <java.io.File: java.lang.String separator>;
v = <java.io.File: java.lang.String separator>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("${system:java.io.tmpdir}\u0001${system:user.name}\u0001operation_logs");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION", 880, "hive.server.logging.operation.log.location", v, "Top level directory where operation logs are stored if logging functionality is enabled");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[4];
v[0] = "NONE";
v[1] = "EXECUTION";
v[2] = "PERFORMANCE";
v[3] = "VERBOSE";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_LOGGING_OPERATION_LEVEL", 881, "hive.server.logging.operation.level", "EXECUTION", v, "HS2 operation logging mode available to clients to be set at session level.\nFor this to work, hive.server.logging.operation.enabled should be set to true.\n  NONE: Ignore any logging\n  EXECUTION: Log completion of tasks\n  PERFORMANCE: Execution + Performance logs \n  VERBOSE: All logs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LOGGING_OPERATION_LEVEL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY", 882, "hive.server.operation.log.cleanup.delay", "300s", v, "When a query is cancelled (via kill query, query timeout or triggers),\n operation logs gets cleaned up after this delay");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_OPERATION_LOG_PURGEPOLICY_TIMETOLIVE", 883, "hive.server.operation.log.purgePolicy.timeToLive", "60s", v, "Number of seconds the appender, which has been dynamically created by Log4J framework for the operation log, should survive without having any events sent to it. For more details, check Log4J\'s IdlePurgePolicy.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_OPERATION_LOG_PURGEPOLICY_TIMETOLIVE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_HISTORIC_OPERATION_LOG_ENABLED", 884, "hive.server.historic.operation.log.enabled", v, "Keep the operation log for some time until the operation\'s query info is evicted from QueryInfoCache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(3000L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 1, null, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_HISTORIC_OPERATION_LOG_CHECK_INTERVAL", 885, "hive.server.historic.operation.log.check.interval", "15m", v, "The check interval for cleaning up the historic operation log and session dirs, which should be used only if hive.server.historic.operation.log.enabled is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(2147483647L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_HISTORIC_OPERATION_LOG_FETCH_MAXBYTES", 886, "hive.server.operation.log.fetch.maxBytes", "4Mb", v, "The buffer size for fetching the operation log, which should be used only if hive.server.historic.operation.log.enabled is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_HISTORIC_OPERATION_LOG_FETCH_MAXBYTES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER", 887, "hive.server.limit.connections.per.user", v, "Maximum hive server connections per user. Any user exceeding this limit will not be allowed to connect. Default=0 does not enforce limits.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS", 888, "hive.server.limit.connections.per.ipaddress", v, "Maximum hive server connections per ipaddress. Any ipaddress exceeding this limit will not be allowed to connect. Default=0 does not enforce limits.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS", 889, "hive.server.limit.connections.per.user.ipaddress", v, "Maximum hive server connections per user:ipaddress combination. Any user-ipaddress exceeding this limit will not be allowed to connect. Default=0 does not enforce limits.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_METRICS_ENABLED", 890, "hive.server.metrics.enabled", v, "Enable metrics on the HiveServer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_METRICS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10001);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_PORT", 891, "hive.server.thrift.http.port", v, "Port number of HiveServer Thrift interface when hive.server.transport.mode is \'http\'.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_PATH", 892, "hive.server.thrift.http.path", "cliservice", "Path component of URL endpoint when in HTTP mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(104857600);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE", 893, "hive.server.thrift.max.message.size", v, "Maximum message size in bytes a HS2 server will accept.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME", 894, "hive.server.thrift.http.max.idle.time", "1800s", v, "Maximum idle time for a connection on the server when in HTTP mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME", 895, "hive.server.thrift.http.worker.keepalive.time", "60s", v, "Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, excessive threads are killed after this time interval.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(6144);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE", 896, "hive.server.thrift.http.request.header.size", v, "Request header size in bytes, when using HTTP transport mode. Jetty defaults used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(6144);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE", 897, "hive.server.thrift.http.response.header.size", v, "Response header size in bytes, when using HTTP transport mode. Jetty defaults used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED", 898, "hive.server.thrift.http.compression.enabled", v, "Enable thrift http compression via Jetty compression support");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED", 899, "hive.server.thrift.http.cookie.auth.enabled", v, "When true, HiveServer in HTTP transport mode, will use cookie based authentication mechanism.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE", 900, "hive.server.thrift.http.cookie.max.age", "86400s", v, "Maximum age in seconds for server side cookie used by HS2 in HTTP mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN", 901, "hive.server.thrift.http.cookie.domain", null, "Domain for the HS2 generated cookies");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH", 902, "hive.server.thrift.http.cookie.path", null, "Path for the HS2 generated cookies");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE", 903, "hive.server.thrift.http.cookie.is.secure", v, "Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY", 904, "hive.server.thrift.http.cookie.is.httponly", v, "HttpOnly attribute of the HS2 generated cookie.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_PORT", 905, "hive.server.thrift.port", v, "Port number of HiveServer Thrift interface when hive.server.transport.mode is \'binary\'.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "auth";
v[1] = "auth-int";
v[2] = "auth-conf";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_SASL_QOP", 906, "hive.server.thrift.sasl.qop", "auth", v, "Sasl QOP value; set it to one of following values to enable higher levels of\nprotection for HiveServer communication with clients.\nSetting hadoop.rpc.protection to a higher level than HiveServer does not\nmake sense in most situations. HiveServer ignores hadoop.rpc.protection in favor\nof hive.server.thrift.sasl.qop.\n  \"auth\" - authentication only (default)\n  \"auth-int\" - authentication plus integrity protection\n  \"auth-conf\" - authentication plus integrity and confidentiality protection\nThis is applicable only if HiveServer is configured to use Kerberos authentication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_SASL_QOP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS", 907, "hive.server.thrift.min.worker.threads", v, "Minimum number of Thrift worker threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(500);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS", 908, "hive.server.thrift.max.worker.threads", v, "Maximum number of Thrift worker threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH", 909, "hive.server.thrift.exponential.backoff.slot.length", "100ms", v, "Binary exponential backoff slot time for Thrift clients during login to HiveServer,\nfor retries until hitting Thrift client timeout");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT", 910, "hive.server.thrift.login.timeout", "20s", v, "Timeout for Thrift clients during login to HiveServer2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME", 911, "hive.server.thrift.worker.keepalive.time", "60s", v, "Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, excessive threads are killed after this time interval.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ASYNC_EXEC_THREADS", 912, "hive.server.async.exec.threads", v, "Number of threads in the async thread pool for HiveServer2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT", 913, "hive.server.async.exec.shutdown.timeout", "10s", v, "How long HiveServer shutdown will wait for async threads to terminate.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE", 914, "hive.server.async.exec.wait.queue.size", v, "Size of the wait queue for async thread pool in HiveServer.\nAfter hitting this limit, the async thread pool will reject new requests.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME", 915, "hive.server.async.exec.keepalive.time", "10s", v, "Time that an idle HiveServer async thread (from the thread pool) will wait for a new task\nto arrive before terminating");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE", 916, "hive.server.async.exec.async.compile", v, "Whether to enable compiling async query asynchronously. If enabled, it is unknown if the query will have any resultset before compilation completed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_LONG_POLLING_TIMEOUT", 917, "hive.server.long.polling.timeout", "5000ms", v, "Time that HiveServer will wait before responding to asynchronous calls that use long polling");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LONG_POLLING_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SESSION_IMPL_CLASSNAME", 918, "hive.session.impl.classname", null, "Classname for custom implementation of hive session");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_CLASSNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME", 919, "hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[8];
v[0] = "NOSASL";
v[1] = "NONE";
v[2] = "LDAP";
v[3] = "KERBEROS";
v[4] = "PAM";
v[5] = "CUSTOM";
v[6] = "SAML";
v[7] = "JWT";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_AUTHENTICATION", 920, "hive.server.authentication", "NONE", v, "Client authentication types.\n  NONE: no authentication check\n  LDAP: LDAP/AD based authentication\n  KERBEROS: Kerberos/GSSAPI authentication\n  CUSTOM: Custom authentication provider\n          (Use with property hive.server.custom.authentication.class)\n  PAM: Pluggable authentication module\n  NOSASL:  Raw transport\n  SAML: SAML 2.0 compliant authentication. This is only supported in http transport mode.\n  JWT: JWT based authentication. HS2 expects JWT contains the user name as subject and was signed by an\n       asymmetric key. This is only supported in http transport mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TRUSTED_DOMAIN", 921, "hive.server.trusted.domain", "", "Specifies the host or a domain to trust connections from. Authentication is skipped for any connection coming from a host whose hostname ends with the value of this property. If authentication is expected to be skipped for connections from only a given host, fully qualified hostname of that host should be specified. By default it is empty, which means that all the connections to HiveServer are authenticated. When it is non-empty, the client has to provide a Hive user name. Any password, if provided, will not be used when authentication is skipped.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_TRUSTED_DOMAIN_USE_XFF_HEADER", 922, "hive.server.trusted.domain.use.xff.header", v, "When trusted domain authentication is enabled, the clients connecting to the HS2 could passthrough many layers of proxy. Some proxies append its own ip address to \'X-Forwarded-For\' headerbefore passing on the request to another proxy or HS2. Some proxies also connect on behalf of clientand may create a separate connection to HS2 without binding using client IP. For such environments, insteadof looking at client IP from the request, if this config is set and if \'X-Forwarded-For\' is present,trusted domain authentication will use left most ip address from X-Forwarded-For header.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TRUSTED_DOMAIN_USE_XFF_HEADER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ALLOW_USER_SUBSTITUTION", 923, "hive.server.allow.user.substitution", v, "Allow alternate user to be specified as part of HiveServer open connection request.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ALLOW_USER_SUBSTITUTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_KERBEROS_KEYTAB", 924, "hive.server.authentication.kerberos.keytab", "", "Kerberos keytab file for server principal");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_KEYTAB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_KERBEROS_PRINCIPAL", 925, "hive.server.authentication.kerberos.principal", "", "Kerberos server principal");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL", 926, "hive.server.authentication.client.kerberos.principal", "", "Kerberos principal used by the HA hive_server2s.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SPNEGO_KEYTAB", 927, "hive.server.authentication.spnego.keytab", "", "keytab file for SPNego principal, optional,\ntypical value would look like /etc/security/keytabs/spnego.service.keytab,\nThis keytab would be used by HiveServer when Kerberos security is enabled and \nHTTP transport mode is used.\nThis needs to be set only if SPNEGO is to be used in authentication.\nSPNego authentication would be honored only if valid\n  hive.server.authentication.spnego.principal\nand\n  hive.server.authentication.spnego.keytab\nare specified.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_KEYTAB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SPNEGO_PRINCIPAL", 928, "hive.server.authentication.spnego.principal", "", "SPNego service principal, optional,\ntypical value would look like HTTP/_HOST@EXAMPLE.COM\nSPNego service principal would be used by HiveServer when Kerberos security is enabled\nand HTTP transport mode is used.\nThis needs to be set only if SPNEGO is to be used in authentication.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SPNEGO_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_URL", 929, "hive.server.authentication.ldap.url", null, "LDAP connection URL(s),\nthis value could contain URLs to multiple LDAP servers instances for HA,\neach LDAP URL is separated by a SPACE character. URLs are used in the \n order specified until a connection is successful.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_URL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_BASEDN", 930, "hive.server.authentication.ldap.baseDN", null, "LDAP base DN");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BASEDN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_DOMAIN", 931, "hive.server.authentication.ldap.Domain", null, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_DOMAIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN", 932, "hive.server.authentication.ldap.groupDNPattern", null, "COLON-separated list of patterns to use to find DNs for group entities in this directory.\nUse %s where the actual group name is to be substituted for.\nFor example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER", 933, "hive.server.authentication.ldap.groupFilter", null, "COMMA-separated list of LDAP Group names (short name not full DNs).\nFor example: HiveAdmins,HadoopAdmins,Administrators");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN", 934, "hive.server.authentication.ldap.userDNPattern", null, "COLON-separated list of patterns to use to find DNs for users in this directory.\nUse %s where the actual group name is to be substituted for.\nFor example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_USERFILTER", 935, "hive.server.authentication.ldap.userFilter", null, "COMMA-separated list of LDAP usernames (just short names, not full DNs).\nFor example: hiveuser,impalauser,hiveadmin,hadoopadmin");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERFILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_GUIDKEY", 936, "hive.server.authentication.ldap.guidKey", "uid", "LDAP attribute name whose values are unique in this LDAP server.\nFor example: uid or CN.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GUIDKEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY", 937, "hive.server.authentication.ldap.groupMembershipKey", "member", "LDAP attribute name on the group object that contains the list of distinguished names\nfor the user, group, and contact objects that are members of the group.\nFor example: member, uniqueMember or memberUid");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY", 938, "hive.server.authentication.ldap.userMembershipKey", null, "LDAP attribute name on the user object that contains groups of which the user is\na direct member, except for the primary group, which is represented by the\nprimaryGroupId.\nFor example: memberOf");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY", 939, "hive.server.authentication.ldap.groupClassKey", "groupOfNames", "LDAP attribute name on the group entry that is to be used in LDAP group searches.\nFor example: group, groupOfNames or groupOfUniqueNames.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY", 940, "hive.server.authentication.ldap.customLDAPQuery", null, "A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\nIf this query returns a null resultset, the LDAP Provider fails the Authentication\nrequest, succeeds if the user is part of the resultset.For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_BIND_USER", 941, "hive.server.authentication.ldap.binddn", null, "The user with which to bind to the LDAP server, and search for the full domain name of the user being authenticated.\nThis should be the full domain name of the user, and should have search access across all users in the LDAP tree.\nIf not specified, then the user being authenticated will be used as the bind user.\nFor example: CN=bindUser,CN=Users,DC=subdomain,DC=domain,DC=com");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD", 942, "hive.server.authentication.ldap.bindpw", null, "The password for the bind user, to be used to search for the full name of the user being authenticated.\nIf the username is specified, this parameter must also be specified.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PLAIN_LDAP_BIND_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS", 943, "hive.server.custom.authentication.class", null, "Custom authentication class. Used when property\n\'hive.server.authentication\' is set to \'CUSTOM\'. Provided class\nmust be a proper implementation of the interface\norg.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\nwill call its Authenticate(user, passed) method to authenticate requests.\nThe implementation may optionally implement Hadoop\'s\norg.apache.hadoop.conf.Configurable class to grab Hive\'s Configuration object.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_PAM_SERVICES", 944, "hive.server.authentication.pam.services", null, "List of the underlying pam services that should be used when auth type is PAM\nA file with the same name must exist in /etc/pam.d");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_PAM_SERVICES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_URL", 945, "hive.server.authentication.jwt.jwks.url", "", "URL of the file from where URLBasedJWKSProvider will try to load JWKS if JWT is enabled for the\nauthentication mode.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_URL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_SKIP_SSL_CERT", 946, "hive.server.authentication.jwt.jwks.skip.ssl.cert", v, "When this is enabled, the SSL certificate verification will be skipped.\nThis is meant to be used in a testing environment only. Do not use in production.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_SKIP_SSL_CERT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_KEYSTORE_PATH", 947, "hive.server.saml.keystore.path", "", "Keystore path to the saml client. This keystore is used to store the\n key pair used to sign the authentication requests when hive.server.saml.sign.requests\n is set to true. If the path doesn\'t exist, HiveServer will attempt to\n create a keystore using the default configurations otherwise it will use\n the one provided.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_KEYSTORE_PASSWORD", 948, "hive.server.saml.keystore.password", "", "Password to the keystore used to sign the authentication requests. By default,\n this must be set to a non-blank value if the authentication mode is SAML.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_KEYSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_PRIVATE_KEY_PASSWORD", 949, "hive.server.saml.private.key.password", "", "Password for the private key which is stored in the keystore pointed \n by hive.server.saml.keystore.path. This key is used to sign the authentication request\n if hive.server.saml.sign.requests is set to true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_PRIVATE_KEY_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_IDP_METADATA", 950, "hive.server.saml.idp.metadata", "", "IDP metadata file for the SAML configuration. This metadata file must be\n exported from the external identity provider. This is used to validate the SAML assertions\n received by HiveServer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_IDP_METADATA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_SP_ID", 951, "hive.server.saml.sp.entity.id", "", "Service provider entity id for this HiveServer. This must match with the\n SP id on the external identity provider. If this is not set, HiveServer will use the\n callback url as the SP id.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SP_ID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_FORCE_AUTH", 952, "hive.server.saml.sp.force.auth", "false", "This is a boolean configuration which toggles the force authentication\n flag in the SAML authentication request. When set to true, the request generated\n to the IDP will ask the IDP to force the authentication again.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_FORCE_AUTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_AUTHENTICATION_LIFETIME", 953, "hive.server.saml.max.authentication.lifetime", "1h", "This configuration can be used to set the lifetime of the\n authentication response from IDP. Generally the IDP will not ask\n you enter credentials if you have a authenticated session with it already.\n The IDP will automatically generate an assertion in such a case. This configuration\n can be used to set the time limit for such assertions. Assertions which are\n older than this value will not be accepted by HiveServer. The default\n is one hour.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_AUTHENTICATION_LIFETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_BLACKLISTED_SIGNATURE_ALGORITHMS", 954, "hive.server.saml.blacklisted.signature.algorithms", "", "Comma separated list of signature algorithm names which are not\n allowed by HiveServer during validation of the assertions received from IDP");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_BLACKLISTED_SIGNATURE_ALGORITHMS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_ACS_INDEX", 955, "hive.server.saml.acs.index", "", "This configuration specifies the assertion consumer service (ACS)\n index to be sent to the IDP in case it support multiple ACS URLs. This\n will also be used to pick the ACS URL from the IDP metadata for validation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_ACS_INDEX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_CALLBACK_URL", 956, "hive.server.saml.sp.callback.url", "", "Callback URL where SAML responses should be posted. Currently this\n must be configured at the same port number as defined by hive.server.thrift.http.port.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_URL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_WANT_ASSERTIONS_SIGNED", 957, "hive.server.saml.want.assertions.signed", v, "When this configuration is set to true, hive server will validate the signature\n of the assertions received at the callback url. For security reasons, it is recommendedthat this value should be true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_WANT_ASSERTIONS_SIGNED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_SIGN_REQUESTS", 958, "hive.server.saml.sign.requests", v, "When this configuration is set to true, HiveServer will sign the SAML requests\n which can be validated by the IDP provider.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_SIGN_REQUESTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_SAML_CALLBACK_TOKEN_TTL", 959, "hive.server.saml.callback.token.ttl", "30s", v, "Time for which the token issued by\nservice provider is valid.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_CALLBACK_TOKEN_TTL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_GROUP_ATTRIBUTE_NAME", 960, "hive.server.saml.group.attribute.name", "", "The attribute name in the SAML assertion which would\n be used to compare for the group name matching. By default it is empty\n which would allow any authenticated user. If this value is set then\n then hive.server.saml.group.filter must be set to a non-empty value.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_ATTRIBUTE_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SAML_GROUP_FILTER", 961, "hive.server.saml.group.filter", "", "Comma separated list of group names which will be allowed when SAML\n authentication is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SAML_GROUP_FILTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ENABLE_DOAS", 962, "hive.server.enable.doAs", v, "Setting this property to true will have HiveServer execute\nHive operations as the user making the calls to it.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ENABLE_DOAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SERVICE_USERS", 963, "hive.server.service.users", null, "Comma separated list of users to have HiveServer skip authorization when compiling queries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SERVICE_USERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DISTCP_DOAS_USER", 964, "hive.distcp.privileged.doAs", "hive", "This property allows privileged distcp executions done by hive\nto run as this user.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DISTCP_DOAS_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "CLASSIC";
v[1] = "HIVE";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_TABLE_TYPE_MAPPING", 965, "hive.server.table.type.mapping", "CLASSIC", v, "This setting reflects how HiveServer will report the table types for JDBC and other\nclient implementations that retrieve the available tables and supported table types\n  HIVE : Exposes Hive\'s native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n  CLASSIC : More generic types like TABLE and VIEW");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_TABLE_TYPE_MAPPING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SESSION_HOOK", 966, "hive.server.session.hook", "", "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_HOOK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_USE_SSL", 967, "hive.server.use.SSL", v, "Set this to true for using SSL encryption in HiveServer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_USE_SSL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_KEYSTORE_PATH", 968, "hive.server.keystore.path", "", "SSL certificate keystore location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_KEYSTORE_PASSWORD", 969, "hive.server.keystore.password", "", "SSL certificate keystore password.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_KEYSTORE_TYPE", 970, "hive.server.keystore.type", "", "SSL certificate keystore type.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_TYPE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_KEYMANAGERFACTORY_ALGORITHM", 971, "hive.server.keymanagerfactory.algorithm", "", "SSL certificate keystore algorithm.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYMANAGERFACTORY_ALGORITHM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_HTTP_EXCLUDE_CIPHERSUITES", 972, "hive.server.http.exclude.ciphersuites", "", "SSL a list of exclude cipher suite names or regular expressions separated by comma for HiveServer http server.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_HTTP_EXCLUDE_CIPHERSUITES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_SSL_BINARY_INCLUDE_CIPHERSUITES", 973, "hive.server.binary.include.ciphersuites", "", "SSL a list of include cipher suite names separated by colon for HiveServer binary Cli Server");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_BINARY_INCLUDE_CIPHERSUITES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_BUILTIN_UDF_WHITELIST", 974, "hive.server.builtin.udf.whitelist", "", "Comma separated list of builtin udf names allowed in queries.\nAn empty whitelist allows all builtin udfs to be executed.  The udf black list takes precedence over udf white list");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_WHITELIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_BUILTIN_UDF_BLACKLIST", 975, "hive.server.builtin.udf.blacklist", "", "Comma separated list of udfs names. These udfs will not be allowed in queries. The udf black list takes precedence over udf white list");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_BUILTIN_UDF_BLACKLIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ALLOW_UDF_LOAD_ON_DEMAND", 976, "hive.allow.udf.load.on.demand", v, "Whether enable loading UDFs from metastore on demand; this is mostly relevant for\nHS2 and was the default behavior before Hive 1.2. Off by default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ALLOW_UDF_LOAD_ON_DEMAND> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(3000L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 1, null, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_SESSION_CHECK_INTERVAL", 977, "hive.server.session.check.interval", "15m", v, "The check interval for session/operation timeout, which can be disabled by setting to zero or negative value.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SESSION_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT", 978, "hive.server.close.session.on.disconnect", v, "Session will be closed when connection is closed. Set this to false to have session outlive its parent connection.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_IDLE_SESSION_TIMEOUT", 979, "hive.server.idle.session.timeout", "4h", v, "Session will be closed when it\'s not accessed for this duration, which can be disabled by setting to zero or negative value.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_IDLE_OPERATION_TIMEOUT", 980, "hive.server.idle.operation.timeout", "2h", v, "Operation will be closed when it\'s not accessed for this duration of time, which can be disabled by setting to zero value.\n  With positive value, it\'s checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n  With negative value, it\'s checked for all of the operations regardless of state.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_OPERATION_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION", 981, "hive.server.idle.session.check.operation", v, "Session will be considered to be idle only if there is no activity, and there is no pending operation.\n This setting takes effect only if session idle timeout (hive.server.idle.session.timeout) and checking\n(hive.server.session.check.interval) are enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT", 982, "hive.server.thrift.client.retry.limit", v, "Number of retries upon failure of Thrift HiveServer calls");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT", 983, "hive.server.thrift.client.connect.retry.limit", v, "Number of retries while opening a connection to HiveServe2");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS", 984, "hive.server.thrift.client.retry.delay.seconds", "1s", v, "Number of seconds for the HiveServer thrift client to wait between consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_CLIENT_USER", 985, "hive.server.thrift.client.user", "anonymous", "Username to use against thrift client");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_CLIENT_PASSWORD", 986, "hive.server.thrift.client.password", "anonymous", "Password to use against thrift client");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_CLIENT_PASSWORD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS", 987, "hive.server.thrift.resultset.serialize.in.tasks", v, "Whether we should serialize the Thrift structures used in JDBC ResultSet RPC in task nodes.\n We use SequenceFile and ThriftJDBCBinarySerDe to read and write the final results if this is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE", 988, "hive.server.thrift.resultset.max.fetch.size", v, "Max number of rows sent in one Fetch RPC call by the server to the client.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE", 989, "hive.server.thrift.resultset.default.fetch.size", v, "The number of rows sent in one Fetch RPC call by the server to the client, if not\nspecified by the client.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_XSRF_FILTER_ENABLED", 990, "hive.server.xsrf.filter.enabled", v, "If enabled, HiveServer will block any requests made to it over http if an X-XSRF-HEADER header is not present");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_XSRF_FILTER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SECURITY_COMMAND_WHITELIST", 991, "hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile,llap", "Comma separated list of non-SQL Hive commands users are authorized to execute");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_COMMAND_WHITELIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH", 992, "hive.server.job.credential.provider.path", "", "If set, this configuration property should provide a comma-separated list of URLs that indicates the type and location of providers to be used by hadoop credential provider API. It provides HiveServer the ability to provide job-specific credential providers for jobs run using Tez, MR execution engines.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SERVER2_GRACEFUL_STOP_TIMEOUT", 993, "hive.server.graceful.stop.timeout", "1800s", v, "Maximum time waiting for live queries being finished and stopping HiveServer. With value not greater than 30s(the overhead to stop HiveServer), it will not wait for the live queries to be done, instead call stop directly to shutdown HiveServer gracefully");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_GRACEFUL_STOP_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15);
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1024L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_MOVE_FILES_THREAD_COUNT", 994, "hive.mv.files.thread", v, v, "Number of threads used to move files in move task. Set it to 0 to disable multi-threaded file moves. This parameter is also used by MSCK to check tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MOVE_FILES_THREAD_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15);
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1024L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT", 995, "hive.load.dynamic.partitions.thread", v, v, "Number of threads used to load dynamic partitions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS", 996, "hive.load.dynamic.partitions.scan.specific.partitions", v, "For the dynamic partitioned tables, scan only the specific partitions using the name from the list");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES", 997, "hive.multi.insert.move.tasks.share.dependencies", v, "If this is set all move tasks for tables/partitions (not directories) at the end of a\nmulti-insert query will only begin once the dependencies for all these move tasks have been\nmet.\nAdvantages: If concurrency is enabled, the locks will only be released once the query has\n            finished, so with this config enabled, the time when the table/partition is\n            generated will be much closer to when the lock on it is released.\nDisadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n               are produced by this query and finish earlier will be available for querying\n               much earlier.  Since the locks are only released once the query finishes, this\n               does not apply if concurrency is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON", 998, "hive.hdfs.encryption.shim.cache.on", v, "Hive keeps a cache of hdfs encryption shims in SessionState. Each encryption shim in the cache stores a FileSystem object. If one of these FileSystems is closed anywhere in the system and HDFS configfs.hdfs.impl.disable.cache is false, its encryption shim in the cache will be unusable. If this is config set to false, then the encryption shim cache will be disabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_INFER_BUCKET_SORT", 999, "hive.exec.infer.bucket.sort", v, "If this is set, when writing partitions, the metadata will include the bucketing/sorting\nproperties with which the data was written if any (this will not overwrite the metadata\ninherited from the table if the table is bucketed/sorted)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO", 1000, "hive.exec.infer.bucket.sort.num.buckets.power.two", v, "If this is set, when setting the number of reducers for the map reduce task which writes the\nfinal output files, it will choose a number which is a power of two, unless the user specifies\nthe number of reducers to use using mapred.reduce.tasks.  The number of reducers\nmay be set to a power of two, only to be followed by a merge task meaning preventing\nanything from being inferred.\nWith hive.exec.infer.bucket.sort set to true:\nAdvantages:  If this is not set, the number of buckets for partitions will seem arbitrary,\n             which means that the number of mappers used for optimized joins, for example, will\n             be very low.  With this set, since the number of buckets used for any partition is\n             a power of two, the number of mappers used for optimized joins will be the least\n             number of buckets used by any partition being joined.\nDisadvantages: This may mean a much larger or much smaller number of reducers being used in the\n               final map reduce job, e.g. if a job was originally going to take 257 reducers,\n               it will now take 512 reducers, similarly if the max number of reducers is 511,\n               and a job was going to use this many, it will now use 256 reducers.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEOPTLISTBUCKETING", 1001, "hive.optimize.listbucketing", v, "Enable list bucketing optimizer. Default value is false so that we disable it by default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEOPTLISTBUCKETING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("SERVER_READ_SOCKET_TIMEOUT", 1002, "hive.server.read.socket.timeout", "10s", v, "Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_READ_SOCKET_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("SERVER_TCP_KEEP_ALIVE", 1003, "hive.server.tcp.keepalive", v, "Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars SERVER_TCP_KEEP_ALIVE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DECODE_PARTITION_NAME", 1004, "hive.decode.partition.name", v, "Whether to show the unquoted partition names in query results.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DECODE_PARTITION_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "mr";
v[1] = "tez";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(boolean,java.lang.String[])>(1, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_EXECUTION_ENGINE", 1005, "hive.execution.engine", "mr", v, "Chooses execution engine. Options are: mr (Map reduce, default), tez. While MR\nremains the default engine for historical reasons, it is itself a historical engine\nand is deprecated in Hive 2 line. It may be removed without further warning.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_ENGINE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "container";
v[1] = "llap";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_EXECUTION_MODE", 1006, "hive.execution.mode", "container", v, "Chooses whether query fragments will run in container or in llap");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXECUTION_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_JAR_DIRECTORY", 1007, "hive.jar.directory", null, "This is the location hive in tez mode will look for to find a site wide \ninstalled hive instance.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_JAR_DIRECTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_USER_INSTALL_DIR", 1008, "hive.user.install.directory", "/user/", "If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \nit will upload the hive jar to \"hive.user.install.directory/user.name\"\nand use it to run queries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_USER_INSTALL_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MASKING_ALGO", 1009, "hive.masking.algo", "sha256", "This property is used to indicate whether FIPS mode is enabled or not. Value should be sha to indicate that FIPS mode is enabled.Else the value should be sha. Using this value column masking is being done");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MASKING_ALGO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_ENABLED", 1010, "hive.vectorized.execution.enabled", v, "This flag should be set to true to enable vectorized mode of query execution.\nThe default value is true to reflect that our most expected Hive deployment will be using vectorization.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_REDUCE_ENABLED", 1011, "hive.vectorized.execution.reduce.enabled", v, "This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED", 1012, "hive.vectorized.execution.reduce.groupby.enabled", v, "This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED", 1013, "hive.vectorized.execution.mapjoin.native.enabled", v, "This flag should be set to true to enable native (i.e. non-pass through) vectorization\nof queries using MapJoin.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED", 1014, "hive.vectorized.execution.mapjoin.native.multikey.only.enabled", v, "This flag should be set to true to restrict use of native vector map join hash tables to\nthe MultiKey in queries using MapJoin.\nThe default value is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED", 1015, "hive.vectorized.execution.mapjoin.minmax.enabled", v, "This flag should be set to true to enable vector map join hash tables to\nuse max / max filtering for integer join queries using MapJoin.\nThe default value is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD", 1016, "hive.vectorized.execution.mapjoin.overflow.repeated.threshold", v, "The number of small table rows for a match in vector map join hash tables\nwhere we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\nA value of -1 means do use the join result optimization.  Otherwise, threshold value can be 0 to maximum integer.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED", 1017, "hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", v, "This flag should be set to true to enable use of native fast vector map join hash tables in\nqueries using MapJoin.\nThe default value is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL", 1018, "hive.vectorized.groupby.checkinterval", v, "Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1000000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_GROUPBY_MAXENTRIES", 1019, "hive.vectorized.groupby.maxentries", v, "Max number of entries in the vector group by aggregation hashtables. \nExceeding this will trigger a flush irrelevant of memory pressure condition.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_MAXENTRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT", 1020, "hive.vectorized.groupby.flush.percent", v, "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED", 1021, "hive.vectorized.execution.reducesink.new.enabled", v, "This flag should be set to true to enable the new vectorization\nof queries using ReduceSink.\niThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT", 1022, "hive.vectorized.use.vectorized.input.format", v, "This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES", 1023, "hive.vectorized.input.format.excludes", "", "This configuration should be set to fully described input format class names for which \n vectorized input format should not be used for vectorized execution.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE", 1024, "hive.vectorized.use.vector.serde.deserialize", v, "This flag should be set to true to enable vectorizing rows using vector deserialize.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_USE_ROW_DESERIALIZE", 1025, "hive.vectorized.use.row.serde.deserialize", v, "This flag should be set to true to enable vectorizing using row deserialize.\nThe default value is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_ROW_DESERIALIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES", 1026, "hive.vectorized.row.serde.inputformat.excludes", "org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat", "The input formats not supported by row deserialize vectorization.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "chosen";
v[2] = "all";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_VECTOR_ADAPTOR_USAGE_MODE", 1027, "hive.vectorized.adaptor.usage.mode", "all", v, "Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a corresponding vectorized class.\n. none   : disable any usage of VectorUDFAdaptor\n. chosen : use VectorUDFAdaptor for a small set of UDFs that were chosen for good performance\n. all    : use VectorUDFAdaptor for all UDFs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTOR_ADAPTOR_USAGE_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE", 1028, "hive.test.vectorized.adaptor.override", v, "internal use only, used to force always using the VectorUDFAdaptor.\nThe default is false, of course", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_PTF_ENABLED", 1029, "hive.vectorized.execution.ptf.enabled", v, "This flag should be set to true to enable vectorized mode of the PTF of query execution.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(25);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT", 1030, "hive.vectorized.ptf.max.memory.buffering.batch.count", v, "Maximum number of vectorized row batches to buffer in memory for PTF\nThe default value is 25");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE", 1031, "hive.vectorized.testing.reducer.batch.size", v, "internal use only, used for creating small group key vectorized row batches to exercise more logic\nThe default value is -1 which means don\'t restrict for testing", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS", 1032, "hive.vectorized.reuse.scratch.columns", v, "internal use only. Disable this to debug scratch column state issues", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED", 1033, "hive.vectorized.complex.types.enabled", v, "This flag should be set to true to enable vectorization\nof expressions with complex types.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED", 1034, "hive.vectorized.groupby.complex.types.enabled", v, "This flag should be set to true to enable group by vectorization\nof aggregations that use complex types.\nFor example, AVG uses a complex type (STRUCT) for partial aggregation resultsThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED", 1035, "hive.vectorized.row.identifier.enabled", v, "This flag should be set to true to enable vectorization of ROW__ID.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS", 1036, "hive.vectorized.use.checked.expressions", v, "This flag should be set to true to use overflow checked vector expressions when available.\nFor example, arithmetic expressions which can overflow the output data type can be evaluated using\n checked vector expressions so that they produce same result as non-vectorized evaluation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS", 1037, "hive.vectorized.adaptor.suppress.evaluate.exceptions", v, "This flag should be set to true to suppress HiveException from the generic UDF function\nevaluate call and turn them into NULLs. Assume, by default, this is not needed");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED", 1038, "hive.vectorized.input.format.supports.enabled", "decimal_64", "Which vectorized input format support features are enabled for vectorization.\nThat is, if a VectorizedInputFormat input format does support \"decimal_64\" for example\nthis variable must enable that to be used in vectorization");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "adaptor";
v[1] = "good";
v[2] = "better";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_VECTORIZED_IF_EXPR_MODE", 1039, "hive.vectorized.if.expr.mode", "better", v, "Specifies the extent to which SQL IF statements will be vectorized.\n. adaptor: only use the VectorUDFAdaptor to vectorize IF statements\n. good   : use regular vectorized IF expression classes that get good performance\n. better : use vectorized IF expression classes that conditionally execute THEN/ELSE\n            expressions for better performance.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZED_IF_EXPR_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "enable";
v[2] = "disable";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,boolean)>("HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE", 1040, "hive.test.vectorized.execution.enabled.override", "none", v, "internal use only, used to override the hive.vectorized.execution.enabled setting and\nturn off vectorization.  The default is false, of course", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE", 1041, "hive.test.vectorization.suppress.explain.execution.mode", v, "internal use only, used to suppress \"Execution mode: vectorized\" EXPLAIN display.\nThe default is false, of course", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,boolean)>("HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS", 1042, "hive.test.vectorizer.suppress.fatal.exceptions", v, "internal use only. When false, don\'t suppress fatal exceptions like\nNullPointerException, etc so the query will fail and assure it will be noticed", 1);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED", 1043, "hive.vectorized.execution.filesink.arrow.native.enabled", v, "This flag should be set to true to enable the native vectorization\nof queries using the Arrow SerDe and FileSink.\nThe default value is false.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TYPE_CHECK_ON_INSERT", 1044, "hive.typecheck.on.insert", v, "This property has been extended to control whether to check, convert, and normalize partition value to conform to its column type in partition operations including but not limited to insert, such as alter, describe etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TYPE_CHECK_ON_INSERT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HADOOP_CLASSPATH", 1045, "hive.hadoop.classpath", null, "For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer \nusing \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\".");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HADOOP_CLASSPATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_RPC_QUERY_PLAN", 1046, "hive.rpc.query.plan", v, "Whether to send the query plan via local resource or RPC");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_RPC_QUERY_PLAN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PLAN_MAPWORK_SERIALIZATION_SKIP_PROPERTIES", 1047, "hive.plan.mapwork.serialization.skip.properties", "", "Comma separated list of properties which is not needed in execution time, so can be removed from PartitionDesc properties before serialization, config can contain exact strings and regex expressions, the regex mode is activated if at least 1 asterisk (*) is present in the current word: rawDataSize                exact string match, removes only rawDataSize property.*Size                     regex match, removes every property ending with \'Size\'numRows,impala_.*chunk.*   comma separated and mixed (handles strings and regexes at the same time)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PLAN_MAPWORK_SERIALIZATION_SKIP_PROPERTIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_AM_SPLIT_GENERATION", 1048, "hive.compute.splits.in.am", v, "Whether to generate the splits locally or in the AM (tez only)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_AM_SPLIT_GENERATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SPLITS_AVAILABLE_SLOTS_CALCULATOR_CLASS", 1049, "hive.splits.available.slots.calculator.class.name", "org.apache.hadoop.hive.ql.exec.tez.TezAvailableSlotsCalculator", "Class to use for calculating available slots during split generation");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SPLITS_AVAILABLE_SLOTS_CALCULATOR_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TEZ_GENERATE_CONSISTENT_SPLITS", 1050, "hive.tez.input.generate.consistent.splits", v, "Whether to generate consistent split locations when generating splits in the AM");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_GENERATE_CONSISTENT_SPLITS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PREWARM_ENABLED", 1051, "hive.prewarm.enabled", v, "Enables container prewarm for Tez(Hadoop 2 only)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_PREWARM_NUM_CONTAINERS", 1052, "hive.prewarm.numcontainers", v, "Controls the number of containers to prewarm for Tez (Hadoop 2 only)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_PREWARM_NUM_CONTAINERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[4];
v[0] = "none";
v[1] = "idonly";
v[2] = "traverse";
v[3] = "execution";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVESTAGEIDREARRANGE", 1053, "hive.stageid.rearrange", "none", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVESTAGEIDREARRANGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES", 1054, "hive.explain.dependency.append.tasktype", v, "");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVEUSEGOOGLEREGEXENGINE", 1055, "hive.use.googleregex.engine", v, "whether to use google regex engine or not, default regex engine is java.util.regex");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVEUSEGOOGLEREGEXENGINE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVECOUNTERGROUP", 1056, "hive.counters.group.name", "HIVE", "The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVECOUNTERGROUP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "column";
v[2] = "standard";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_QUOTEDID_SUPPORT", 1057, "hive.support.quoted.identifiers", "column", v, "Whether to use quoted identifier. \'none\', \'column\', and \'standard\' can be used. \n  none: Quotation of identifiers and special characters in identifiers are not allowed but regular expressions in backticks are supported for column names.\n  column: Use the backtick character to quote identifiers having special characters. `col1` Use single quotes to quote string literals. \'value\' Double quotes are also accepted but not recommended.  standard: SQL standard way to quote identifiers. Use double quotes to quote identifiers having special characters \"col1\" and single quotes for string literals. \'value\'");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUOTEDID_SUPPORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES", 1058, "hive.support.special.characters.tablename", v, "This flag should be set to true to enable support for special characters in table names.\nWhen it is set to false, only [a-zA-Z_0-9]+ are supported.\nThe supported special characters are %&\'()*+,-./:;<=>?[]_|{}$^!~#@ and space. This flag applies only to quoted table names.\nThe default value is true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CREATE_TABLE_AS_EXTERNAL", 1059, "hive.create.as.external.legacy", v, "When this flag set to true. it will ignore hive.create.as.acid and hive.create.as.insert.only,create external purge table by default.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLE_AS_EXTERNAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("CREATE_TABLES_AS_ACID", 1060, "hive.create.as.acid", v, "Whether the eligible tables should be created as full ACID by default. Does \nnot apply to external tables, the ones using storage handlers, etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars CREATE_TABLES_AS_ACID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CREATE_TABLES_AS_INSERT_ONLY", 1061, "hive.create.as.insert.only", v, "Whether the eligible tables should be created as ACID insert-only by default. Does \nnot apply to external tables, the ones using storage handlers, etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CREATE_TABLES_AS_INSERT_ONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ACID_DIRECT_INSERT_ENABLED", 1062, "hive.acid.direct.insert.enabled", v, "Enable writing the data files directly to the table\'s final destination instead of the staging directory.This optimization only applies on INSERT operations on ACID tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ACID_DIRECT_INSERT_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TXN_CTAS_X_LOCK", 1063, "hive.txn.xlock.ctas", v, "Enables exclusive locking for CTAS operations.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TXN_CTAS_X_LOCK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.String,boolean,java.lang.String)>("USERS_IN_ADMIN_ROLE", 1064, "hive.users.in.admin.role", "", 0, "Comma separated list of users who are in admin role for bootstrapping.\nMore users can be added in ADMIN role later.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars USERS_IN_ADMIN_ROLE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <org.apache.hive.common.HiveCompat: java.lang.String DEFAULT_COMPAT_LEVEL>;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_COMPAT", 1065, "hive.compat", v, "Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\nSetting to 0.12:\n  Maintains division behavior: int / int = double");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ", 1066, "hive.convert.join.bucket.mapjoin.tez", v, "Whether joins can be automatically converted to bucket map joins in hive \nwhen tez is used as the execution engine.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TEZ_BMJ_USE_SUBCACHE", 1067, "hive.tez.bmj.use.subcache", v, "Use subcache to reuse hashtable across multiple tasks");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_BMJ_USE_SUBCACHE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CHECK_CROSS_PRODUCT", 1068, "hive.exec.check.crossproducts", v, "Check if a plan contains a Cross Product. If there is one, output a warning to the Session\'s console.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CHECK_CROSS_PRODUCT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL", 1069, "hive.localize.resource.wait.interval", "5000ms", v, "Time to wait for another thread to localize the same resource for hive-tez.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS", 1070, "hive.localize.resource.num.wait.attempts", v, "The number of attempts waiting for localizing a resource in hive-tez.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_AUTO_REDUCER_PARALLELISM", 1071, "hive.tez.auto.reducer.parallelism", v, "Turn on Tez\' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\nand set parallelism estimates. Tez will sample source vertices\' output sizes and adjust the estimates at runtime as\nnecessary.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_AUTO_REDUCER_PARALLELISM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.33F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR", 1072, "hive.tez.llap.min.reducer.per.executor", v, "If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\nbe set to this fraction of the number of executors.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_MAPREDUCE_OUTPUT_COMMITTER", 1073, "hive.tez.mapreduce.output.committer.class", "org.apache.tez.mapreduce.committer.MROutputCommitter", "Output committer class which should be invoked at the setup/commit lifecycle points of vertex executions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAPREDUCE_OUTPUT_COMMITTER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(2.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_MAX_PARTITION_FACTOR", 1074, "hive.tez.max.partition.factor", v, "When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_PARTITION_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.25F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_MIN_PARTITION_FACTOR", 1075, "hive.tez.min.partition.factor", v, "When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\nof reducers that tez specifies.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_PARTITION_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_OPTIMIZE_BUCKET_PRUNING", 1076, "hive.tez.bucket.pruning", v, "When pruning is enabled, filters on bucket columns will be processed by \nfiltering the splits against a bitset of included buckets. This needs predicates \nproduced by hive.optimize.ppd and hive.optimize.index.filters.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT", 1077, "hive.tez.bucket.pruning.compat", v, "When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\nThis occasionally doubles the data scan cost, but is default enabled for safety");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_PARTITION_PRUNING", 1078, "hive.tez.dynamic.partition.pruning", v, "When dynamic pruning is enabled, joins on partition keys will be processed by sending\nevents from the processing vertices to the Tez application master. These events will be\nused to prune unnecessary partitions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED", 1079, "hive.tez.dynamic.partition.pruning.extended", v, "Whether we should try to create additional opportunities for dynamic pruning, e.g., considering\nsiblings that may not be created by normal dynamic pruning logic.\nOnly works when dynamic pruning is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1048576L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE", 1080, "hive.tez.dynamic.partition.pruning.max.event.size", v, "Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(104857600L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE", 1081, "hive.tez.dynamic.partition.pruning.max.data.size", v, "Maximum total data size of events in dynamic pruning.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_SEMIJOIN_REDUCTION", 1082, "hive.tez.dynamic.semijoin.reduction", v, "When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This requires hive.tez.dynamic.partition.pruning to be enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_MIN_BLOOM_FILTER_ENTRIES", 1083, "hive.tez.min.bloom.filter.entries", v, "Bloom filter should be of at min certain size to be effective");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MIN_BLOOM_FILTER_ENTRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_MAX_BLOOM_FILTER_ENTRIES", 1084, "hive.tez.max.bloom.filter.entries", v, "Bloom filter should be of at max certain size to be effective");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_MAX_BLOOM_FILTER_ENTRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_BLOOM_FILTER_FACTOR", 1085, "hive.tez.bloom.filter.factor", v, "Bloom filter should be a multiple of this factor with nDV");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_BLOOM_FILTER_MERGE_THREADS", 1086, "hive.tez.bloom.filter.merge.threads", v, "How many threads are used for merging bloom filters in addition to task\'s main thread?\n-1: sanity check, it will fail if execution hits bloom filter merge codepath\n 0: feature is disabled, use only task\'s main thread for bloom filter merging\n 1: recommended value: there is only 1 merger thread (additionally to the task\'s main thread),according perf tests, this can lead to serious improvement \n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BLOOM_FILTER_MERGE_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION", 1087, "hive.tez.bigtable.minsize.semijoin.reduction", v, "Big table for runtime filtering should be of atleast this size");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD", 1088, "hive.tez.dynamic.semijoin.reduction.threshold", v, "Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN", 1089, "hive.tez.dynamic.semijoin.reduction.multicolumn", v, "Whether to consider multicolumn semijoin reducers or not.\nThis should always be set to true. Since it is a new feature, it has been made configurable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN", 1090, "hive.tez.dynamic.semijoin.reduction.for.mapjoin", v, "Use a semi-join branch for map-joins. This may not make it faster, but is helpful in certain join patterns.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR", 1091, "hive.tez.dynamic.semijoin.reduction.for.dpp.factor", v, "The factor to decide if semijoin branch feeds into a TableScan\nwhich has an outgoing Dynamic Partition Pruning (DPP) branch based on number of distinct values.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_SMB_NUMBER_WAVES", 1092, "hive.tez.smb.number.waves", v, "The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SMB_NUMBER_WAVES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_EXEC_SUMMARY", 1093, "hive.tez.exec.print.summary", v, "Display breakdown of execution steps, for every query executed by the shell.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_SUMMARY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "text";
v[2] = "json";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("TEZ_SESSION_EVENTS_SUMMARY", 1094, "hive.tez.session.events.print.summary", "none", v, "Display summary of all tez sessions related events in text or json format");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SESSION_EVENTS_SUMMARY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_EXEC_INPLACE_PROGRESS", 1095, "hive.tez.exec.inplace.progress", v, "Updates tez job execution progress in-place in the terminal when hive-cli is used.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_EXEC_INPLACE_PROGRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_INPLACE_PROGRESS", 1096, "hive.server.in.place.progress", v, "Allows hive server 2 to send progress bar update information. This is currently available only if the execution engine is tez.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_INPLACE_PROGRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("TEZ_DAG_STATUS_CHECK_INTERVAL", 1097, "hive.tez.dag.status.check.interval", "500ms", v, "Interval between subsequent DAG status invocation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_DAG_STATUS_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.8F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION", 1098, "hive.tez.container.max.java.heap.fraction", v, "This is to override the tez setting with the same name");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.3F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN", 1099, "hive.tez.task.scale.memory.reserve-fraction.min", v, "This is to override the tez setting tez.task.scale.memory.reserve-fraction");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX", 1100, "hive.tez.task.scale.memory.reserve.fraction.max", v, "The maximum fraction of JVM memory which Tez will reserve for the processor");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(-1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION", 1101, "hive.tez.task.scale.memory.reserve.fraction", v, "The customized fraction of JVM memory which Tez will reserve for the processor");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED", 1102, "hive.tez.cartesian-product.enabled", v, "Use Tez cartesian product edge to speed up cross product");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB", 1103, "hive.tez.unordered.output.buffer.size.mb", v, "When we have an operation that does not need a large buffer, we use this buffer size for simple custom edge.\nValue is an integer. Default value is -1, which means that we will estimate this value from operators in the plan.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENABLED", 1104, "hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_CACHE_ONLY", 1105, "hive.llap.io.cache.only", v, "Whether the query should read from cache only. If set to true and a cache miss happens during the read an exception will occur. Primarily used for testing.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_ONLY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ROW_WRAPPER_ENABLED", 1106, "hive.llap.io.row.wrapper.enabled", v, "Whether the LLAP IO row wrapper is enabled for non-vectorized queries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ROW_WRAPPER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ACID_ENABLED", 1107, "hive.llap.io.acid", v, "Whether the LLAP IO layer is enabled for ACID.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ACID_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(2147483647L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_TRACE_SIZE", 1108, "hive.llap.io.trace.size", "2Mb", v, "The buffer size for a per-fragment LLAP debug trace. 0 to disable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_TRACE_ALWAYS_DUMP", 1109, "hive.llap.io.trace.always.dump", v, "Whether to always dump the LLAP IO trace (if enabled); the default is on error.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_TRACE_ALWAYS_DUMP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_NONVECTOR_WRAPPER_ENABLED", 1110, "hive.llap.io.nonvector.wrapper.enabled", v, "Whether the LLAP IO layer is enabled for non-vectorized queries that read inputs\nthat can be vectorized");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_NONVECTOR_WRAPPER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[2];
v[0] = "cache";
v[1] = "none";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_MEMORY_MODE", 1111, "hive.llap.io.memory.mode", "cache", v, "LLAP IO memory usage; \'cache\' (the default) uses data and metadata cache with a\ncustom off-heap allocator, \'none\' doesn\'t use either (this mode may result in\nsignificant performance degradation)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_ALLOCATOR_MIN_ALLOC", 1112, "hive.llap.io.allocator.alloc.min", "4Kb", v, "Minimum allocation possible from LLAP buddy allocator. Allocations below that are\npadded to minimum allocation. For ORC, should generally be the same as the expected\ncompression buffer size, or next lowest power of 2. Must be a power of 2.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MIN_ALLOC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_ALLOCATOR_MAX_ALLOC", 1113, "hive.llap.io.allocator.alloc.max", "16Mb", v, "Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\nthe largest expected ORC compression buffer size. Must be a power of 2.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_ALLOC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(8);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_ARENA_COUNT", 1114, "hive.llap.io.allocator.arena.count", v, "Arena count for LLAP low-level cache; cache will be allocated in the steps of\n(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\nnot the case, an adjusted size will be used. Using powers of 2 is recommended.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_ARENA_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_IO_MEMORY_MAX_SIZE", 1115, "hive.llap.io.memory.size", "1Gb", v, "Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_MEMORY_MAX_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_DIRECT", 1116, "hive.llap.io.allocator.direct", v, "Whether ORC low-level cache should use direct allocation.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DIRECT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_PREALLOCATE", 1117, "hive.llap.io.allocator.preallocate", v, "Whether to preallocate the entire IO memory at init time.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_PREALLOCATE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_MAPPED", 1118, "hive.llap.io.allocator.mmap", v, "Whether ORC low-level cache should use memory mapped allocation (direct I/O). \nThis is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$WritableDirectoryValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$WritableDirectoryValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_ALLOCATOR_MAPPED_PATH", 1119, "hive.llap.io.allocator.mmap.path", "/tmp", v, "The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAPPED_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "freelist";
v[1] = "brute";
v[2] = "both";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_ALLOCATOR_DISCARD_METHOD", 1120, "hive.llap.io.allocator.discard.method", "both", v, "Which method to use to force-evict blocks to deal with fragmentation:\nfreelist - use half-size free list (discards less, but also less reliable); brute -\nbrute force, discard whatever we can; both - first try free list, then brute force.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DISCARD_METHOD> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_DEFRAG_HEADROOM", 1121, "hive.llap.io.allocator.defrag.headroom", "1Mb", "How much of a headroom to leave to allow allocator more flexibility to defragment.\nThe allocator would further cap it to a fraction of total memory.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_DEFRAG_HEADROOM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOCATOR_MAX_FORCE_EVICTED", 1122, "hive.llap.io.allocator.max.force.eviction", "16Mb", "Fragmentation can lead to some cases where more eviction has to happen to accommodate allocations\n This configuration puts a limit on how many bytes to force evict before using Allocator Discard method. Higher values will allow allocator more flexibility and will lead to better caching.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_FORCE_EVICTED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TRACK_CACHE_USAGE", 1123, "hive.llap.io.track.cache.usage", v, "Whether to tag LLAP cache contents, mapping them to Hive entities (paths for\npartitions and tables) for reporting.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TRACK_CACHE_USAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_USE_LRFU", 1124, "hive.llap.io.use.lrfu", v, "Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_LRFU> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_LRFU_LAMBDA", 1125, "hive.llap.io.lrfu.lambda", v, "Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\nbehave like LFU, 1 makes it behave like LRU, values in between balance accordingly.\nThe meaning of this parameter is the inverse of the number of time ticks (cache\n operations, currently) that cause the combined recency-frequency of a block in cache\n to be halved.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_LAMBDA> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.1F);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.0F);
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_LRFU_HOTBUFFERS_PERCENTAGE", 1126, "hive.llap.io.lrfu.hotbuffers.percentage", v, v, "The number specifies the percentage of the cached buffers which are considered the most important ones based on the policy.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_HOTBUFFERS_PERCENTAGE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(64);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_LRFU_BP_WRAPPER_SIZE", 1127, "hive.llap.io.lrfu.bp.wrapper.size", v, "thread local queue used to amortize the lock contention, the idea hear is to try locking as soon we reach max size / 2 and block when max queue size reached");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_LRFU_BP_WRAPPER_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CACHE_ALLOW_SYNTHETIC_FILEID", 1128, "hive.llap.cache.allow.synthetic.fileid", v, "Whether LLAP cache should use synthetic file ID if real one is not available. Systems\nlike HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\nFS), the cache would not work by default because LLAP is unable to uniquely track the\nfiles; enabling this setting allows LLAP to generate file ID from the path, size and\nmodification time, which is almost certain to identify file uniquely. However, if you\nuse a FS without file IDs and rewrite files a lot (or are paranoid), you might want\nto avoid this setting.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ALLOW_SYNTHETIC_FILEID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CACHE_DEFAULT_FS_FILE_ID", 1129, "hive.llap.cache.defaultfs.only.native.fileid", v, "Whether LLAP cache should use native file IDs from the default FS only. This is to\navoid file ID collisions when several different DFS instances are in use at the same\ntime. Disable this check to allow native file IDs from non-default DFS.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_DEFAULT_FS_FILE_ID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CACHE_ENABLE_ORC_GAP_CACHE", 1130, "hive.llap.orc.gap.cache", v, "Whether LLAP cache for ORC should remember gaps in ORC compression buffer read\nestimates, to avoid re-reading the data that was read once and discarded because it\nis unneeded. This is only necessary for ORC files written before HIVE-9660.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_ENABLE_ORC_GAP_CACHE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CACHE_HYDRATION_STRATEGY_CLASS", 1131, "hive.llap.cache.hydration.strategy.class", "", "Strategy class for managing the llap cache hydration. It\'s executed when the daemon starts and stops, and gives a chance to save and/or load the contents of the llap cache. If left empty the feature is disabled.\nThe class should implement org.apache.hadoop.hive.llap.LlapCacheHydration interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_STRATEGY_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CACHE_HYDRATION_SAVE_DIR", 1132, "hive.llap.cache.hydration.save.dir", "/tmp/hive", "Directory to save the llap cache content\ninfo on shutdown, if BasicLlapCacheHydration is used as the hive.llap.cache.hydration.strategy.class.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CACHE_HYDRATION_SAVE_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_USE_FILEID_PATH", 1133, "hive.llap.io.use.fileid.path", v, "Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\ncases of file overwrites. This is supported on HDFS. Disabling this also turns off any\ncache consistency checks based on fileid comparisons.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_USE_FILEID_PATH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_ENABLED", 1134, "hive.llap.io.encode.enabled", v, "Whether LLAP should try to re-encode and cache data for non-ORC formats. This is used\non LLAP Server side to determine if the infrastructure for that is initialized.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_FORMATS", 1135, "hive.llap.io.encode.formats", "org.apache.hadoop.mapred.TextInputFormat,", "The table input formats for which LLAP IO should re-encode and cache data.\nComma-separated list.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_FORMATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MIN_ALLOC>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOCATOR_MAX_ALLOC>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (org.apache.hadoop.hive.conf.HiveConf$ConfVars,org.apache.hadoop.hive.conf.HiveConf$ConfVars)>(v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("Allocation size for the buffers used to cache encoded data from non-ORC files. Must\nbe a power of two between \u and\n\u.");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_ENCODE_ALLOC_SIZE", 1136, "hive.llap.io.encode.alloc.size", "256Kb", v, v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_ALLOC_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED", 1137, "hive.llap.io.encode.vector.serde.enabled", v, "Whether LLAP should use vectorized SerDe reader to read text data when re-encoding.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED", 1138, "hive.llap.io.encode.vector.serde.async.enabled", v, "Whether LLAP should use async mode in vectorized SerDe reader to read text data.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_SLICE_ROW_COUNT", 1139, "hive.llap.io.encode.slice.row.count", v, "Row count to use to separate cache slices when reading encoded data from row-based\ninputs into LLAP cache, if this feature is enabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_ROW_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_SLICE_LRR", 1140, "hive.llap.io.encode.slice.lrr", v, "Whether to separate cache slices when reading encoded data from text inputs via MR\nMR LineRecordReader into LLAP cache, if this feature is enabled. Safety flag.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_SLICE_LRR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ORC_ENABLE_TIME_COUNTERS", 1141, "hive.llap.io.orc.time.counters", v, "Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ORC_ENABLE_TIME_COUNTERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) 50000;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_VRB_QUEUE_LIMIT_MAX", 1142, "hive.llap.io.vrb.queue.limit.max", v, "The maximum queue size for VRBs produced by a LLAP IO thread when the processing is\nslower than the IO. The actual queue size is set per fragment, and is adjusted down\nfrom the base, depending on the schema see LLAP_IO_CVB_BUFFERED_SIZE.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MAX> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_VRB_QUEUE_LIMIT_MIN", 1143, "hive.llap.io.vrb.queue.limit.min", v, "The minimum queue size for VRBs produced by a LLAP IO thread when the processing is\nslower than the IO (used when determining the size from base size).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_VRB_QUEUE_LIMIT_MIN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1073741824L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_CVB_BUFFERED_SIZE", 1144, "hive.llap.io.cvb.memory.consumption.", v, "The amount of bytes used to buffer CVB between IO and Processor Threads default to 1GB, this will be used to compute a best effort queue size for VRBs produced by a LLAP IO thread.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CVB_BUFFERED_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_PROACTIVE_EVICTION_ENABLED", 1145, "hive.llap.io.proactive.eviction.enabled", v, "If true proactive cache eviction is enabled, thus LLAP will proactively evict buffers that belong to dropped Hive entities (DBs, tables, partitions, or temp tables.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_PROACTIVE_EVICTION_SWEEP_INTERVAL", 1146, "hive.llap.io.proactive.eviction.sweep.interval", "5s", v, "How frequently (in seconds) LLAP should check for buffers marked for proactive eviction andproceed with their eviction.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_SWEEP_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_PROACTIVE_EVICTION_INSTANT_DEALLOC", 1147, "hive.llap.io.proactive.eviction.instant.dealloc", v, "Experimental feature: when set to true, buffer deallocation will happen as soon as proactive eviction notifications are received by the daemon. Sweep phase of proactive eviction will only do the cache policy cleanup in this case. This can increase cache hit ratio but might scale bad in a workload that generates many proactive eviction events.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PROACTIVE_EVICTION_INSTANT_DEALLOC> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "metadata";
v[2] = "all";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_CACHE_DELETEDELTAS", 1148, "hive.llap.io.cache.deletedeltas", "all", v, "When set to \'all\' queries that use LLAP IO for execution will also access delete delta files via LLAP IO layer and thus they will be fully cached. When set to \'metadata\', only the tail of delete deltas will be cached. If set to \'none\', only the base files and insert deltas will be channeled through LLAP, while delete deltas will be accessed directly from their configured FS without caching them. This feature only works with ColumnizedDeleteEventRegistry, SortMergedDeleteEventRegistry is not supported.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_CACHE_DELETEDELTAS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_PATH_CACHE_SIZE", 1149, "hive.llap.io.path.cache.size", "10Mb", v, "The amount of the maximum memory allowed to store the file paths.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_PATH_CACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_SHARE_OBJECT_POOLS", 1150, "hive.llap.io.share.object.pools", v, "Whether to used shared object pools in LLAP IO. A safety flag.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_SHARE_OBJECT_POOLS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_ALLOW_UBER", 1151, "hive.llap.auto.allow.uber", v, "Whether or not to allow the planner to run vertices in the AM.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ALLOW_UBER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_ENFORCE_TREE", 1152, "hive.llap.auto.enforce.tree", v, "Enforce that all parents are in llap, before considering vertex");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_TREE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_ENFORCE_VECTORIZED", 1153, "hive.llap.auto.enforce.vectorized", v, "Enforce that inputs are vectorized, before considering vertex");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_VECTORIZED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_ENFORCE_STATS", 1154, "hive.llap.auto.enforce.stats", v, "Enforce that col stats are available, before considering vertex");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_ENFORCE_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10737418240L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_MAX_INPUT", 1155, "hive.llap.auto.max.input.size", v, "Check input size, before considering vertex (-1 disables check)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_INPUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1073741824L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_AUTO_MAX_OUTPUT", 1156, "hive.llap.auto.max.output.size", v, "Check output size, before considering vertex (-1 disables check)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_AUTO_MAX_OUTPUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_SKIP_COMPILE_UDF_CHECK", 1157, "hive.llap.skip.compile.udf.check", v, "Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\nexecute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\njars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SKIP_COMPILE_UDF_CHECK> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ALLOW_PERMANENT_FNS", 1158, "hive.llap.allow.permanent.fns", v, "Whether LLAP decider should allow permanent UDFs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ALLOW_PERMANENT_FNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[5];
v[0] = "auto";
v[1] = "none";
v[2] = "all";
v[3] = "map";
v[4] = "only";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_EXECUTION_MODE", 1159, "hive.llap.execution.mode", "none", v, "Chooses whether query fragments will run in container or in llap");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXECUTION_MODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "none";
v[1] = "encode";
v[2] = "all";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_IO_ETL_SKIP_FORMAT", 1160, "hive.llap.io.etl.skip.format", "encode", v, "For ETL queries, determines whether to skip llap io cache. By default, hive.llap.io.encode.enabled will be set to false which disables LLAP IO for text formats. Setting it to \'all\' will disable LLAP IO for all formats. \'none\' will not disable LLAP IO for any formats.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ETL_SKIP_FORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_OBJECT_CACHE_ENABLED", 1161, "hive.llap.object.cache.enabled", v, "Cache objects (plans, hashtables, etc) in llap");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OBJECT_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS", 1162, "hive.llap.io.decoding.metrics.percentiles.intervals", "30", "Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\nfor percentile latency metrics on the LLAP daemon IO decoding time.\nhive.llap.queue.metrics.percentiles.intervals");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_THREADPOOL_SIZE", 1163, "hive.llap.io.threadpool.size", v, "Specify the number of threads to use for low-level IO thread pool.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_THREADPOOL_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_IO_ENCODE_THREADPOOL_MULTIPLIER", 1164, "hive.llap.io.encode.threadpool.multiplier", v, "Used to determine the size of IO encode threadpool by multiplying hive.llap.io.threadpool.sizewith this value. During text table reads a thread from the \'regular\' IO thread pool may place a number ofencode tasks to the threads in the encode pool.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_IO_ENCODE_THREADPOOL_MULTIPLIER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_USE_KERBEROS", 1165, "hive.llap.kerberos.enabled", v, "If LLAP is configured for Kerberos authentication. This could be useful when cluster\nis kerberized, but LLAP is not.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_USE_KERBEROS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_KERBEROS_PRINCIPAL", 1166, "hive.llap.daemon.service.principal", "", "The name of the LLAP daemon\'s service principal.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_KERBEROS_KEYTAB_FILE", 1167, "hive.llap.daemon.keytab.file", "", "The path to the Kerberos Keytab file containing the LLAP daemon\'s service principal.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_KERBEROS_KEYTAB_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_WEBUI_SPNEGO_KEYTAB_FILE", 1168, "hive.llap.webui.spnego.keytab", "", "The path to the Kerberos Keytab file containing the LLAP WebUI SPNEGO principal.\nTypical value would look like /etc/security/keytabs/spnego.service.keytab.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_KEYTAB_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_WEBUI_SPNEGO_PRINCIPAL", 1169, "hive.llap.webui.spnego.principal", "", "The LLAP WebUI SPNEGO service principal. Configured similarly to\nhive.server.webui.spnego.principal");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEBUI_SPNEGO_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_FS_KERBEROS_PRINCIPAL", 1170, "hive.llap.task.principal", "", "The name of the principal to use to run tasks. By default, the clients are required\nto provide tokens to access HDFS/etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_FS_KERBEROS_KEYTAB_FILE", 1171, "hive.llap.task.keytab.file", "", "The path to the Kerberos Keytab file containing the principal to use to run tasks.\nBy default, the clients are required to provide tokens to access HDFS/etc.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FS_KERBEROS_KEYTAB_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ZKSM_ZK_CONNECTION_STRING", 1172, "hive.llap.zk.sm.connectionString", "", "ZooKeeper connection string for ZooKeeper SecretManager.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_CONNECTION_STRING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_ZKSM_ZK_SESSION_TIMEOUT", 1173, "hive.llap.zk.sm.session.timeout", "40s", v, "ZooKeeper session timeout for ZK SecretManager.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZKSM_ZK_SESSION_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ZK_REGISTRY_USER", 1174, "hive.llap.zk.registry.user", "", "In the LLAP ZooKeeper-based registry, specifies the username in the Zookeeper path.\nThis should be the hive user or whichever user is running the LLAP daemon.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_USER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ZK_REGISTRY_NAMESPACE", 1175, "hive.llap.zk.registry.namespace", null, "In the LLAP ZooKeeper-based registry, overrides the ZK path namespace. Note that\nusing this makes the path management (e.g. setting correct ACLs) your responsibility.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ZK_REGISTRY_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_SECURITY_ACL", 1176, "hive.llap.daemon.acl", "*", "The ACL for LLAP daemon.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_SECURITY_ACL_DENY", 1177, "hive.llap.daemon.acl.blocked", "", "The deny ACL for LLAP daemon.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SECURITY_ACL_DENY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MANAGEMENT_ACL", 1178, "hive.llap.management.acl", "*", "The ACL for LLAP daemon management.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MANAGEMENT_ACL_DENY", 1179, "hive.llap.management.acl.blocked", "", "The deny ACL for LLAP daemon management.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_ACL_DENY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_PLUGIN_ACL", 1180, "hive.llap.plugin.acl", "*", "The ACL for LLAP plugin AM endpoint.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_PLUGIN_ACL_DENY", 1181, "hive.llap.plugin.acl.blocked", "", "The deny ACL for LLAP plugin AM endpoint.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_ACL_DENY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "false";
v[1] = "except_llap_owner";
v[2] = "true";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_REMOTE_TOKEN_REQUIRES_SIGNING", 1182, "hive.llap.remote.token.requires.signing", "true", v, "Whether the token returned from LLAP management API should require fragment signing.\nTrue by default; can be disabled to allow CLI to get tokens from LLAP in a secure\ncluster by setting it to true or \'except_llap_owner\' (the latter returns such tokens\nto everyone except the user LLAP cluster is authenticating under).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_REMOTE_TOKEN_REQUIRES_SIGNING> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_DELEGATION_TOKEN_LIFETIME", 1183, "hive.llap.daemon.delegation.token.lifetime", "14d", v, "LLAP delegation token lifetime, in seconds if specified without a unit.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DELEGATION_TOKEN_LIFETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15004);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MANAGEMENT_RPC_PORT", 1184, "hive.llap.management.rpc.port", v, "RPC port for LLAP daemon management service.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MANAGEMENT_RPC_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_WEB_AUTO_AUTH", 1185, "hive.llap.auto.auth", v, "Whether or not to set Hadoop configs to enable auth in LLAP web app.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_WEB_AUTO_AUTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_RPC_NUM_HANDLERS", 1186, "hive.llap.daemon.rpc.num.handlers", v, "Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_NUM_HANDLERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_PLUGIN_RPC_PORT", 1187, "hive.llap.plugin.rpc.port", v, "Port to use for LLAP plugin rpc server");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_PLUGIN_RPC_NUM_HANDLERS", 1188, "hive.llap.plugin.rpc.num.handlers", v, "Number of RPC handlers for AM LLAP plugin endpoint.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_RPC_NUM_HANDLERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_HDFS_PACKAGE_DIR", 1189, "hive.llap.hdfs.package.dir", ".yarn", "Package directory on HDFS used for holding collected configuration and libraries required for YARN launch. Note: this should be set to the same as yarn.service.base.path");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HDFS_PACKAGE_DIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_WORK_DIRS", 1190, "hive.llap.daemon.work.dirs", "", "Working directories for the daemon. This should not be set if running as a YARN\nService. It must be set when not running on YARN. If the value is set when\nrunning as a YARN Service, the specified value will be used.", "llap.daemon.work.dirs");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WORK_DIRS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15551);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_YARN_SHUFFLE_PORT", 1191, "hive.llap.daemon.yarn.shuffle.port", v, "YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_SHUFFLE_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_YARN_CONTAINER_MB", 1192, "hive.llap.daemon.yarn.container.mb", v, "llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_YARN_CONTAINER_MB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_QUEUE_NAME", 1193, "hive.llap.daemon.queue.name", null, "Queue name within which the llap application will run. Used in LlapServiceDriver and package.py");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_QUEUE_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_CONTAINER_ID", 1194, "hive.llap.daemon.container.id", null, "ContainerId of a running LlapDaemon. Used to publish to the registry");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_CONTAINER_ID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_NM_ADDRESS", 1195, "hive.llap.daemon.nm.address", null, "NM Address host:rpcPort for the NodeManager on which the instance of the daemon is running.\nPublished to the llap registry. Should never be set by users");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NM_ADDRESS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED", 1196, "hive.llap.daemon.shuffle.dir.watcher.enabled", v, "TODO doc", "llap.daemon.shuffle.dir-watcher.enabled");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS", 1197, "hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms", v, "Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\ninterval, but otherwise as high as possible to avoid unnecessary traffic.", "llap.daemon.am.liveness.heartbeat.interval-ms");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS", 1198, "hive.llap.am.liveness.connection.timeout.ms", "10000ms", v, "Amount of time to wait on connection failures to the AM from an LLAP daemon before\nconsidering the AM to be dead.", "llap.am.liveness.connection.timeout-millis");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_AM_USE_FQDN", 1199, "hive.llap.am.use.fqdn", v, "Whether to use FQDN of the AM machine when submitting work to LLAP.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_USE_FQDN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_EXEC_USE_FQDN", 1200, "hive.llap.exec.use.fqdn", v, "On non-kerberized clusters, where the hostnames are stable but ip address changes, setting this config\n to false will use ip address of llap daemon in execution context instead of FQDN");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_EXEC_USE_FQDN> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS", 1201, "hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms", v, "Sleep duration while waiting to retry connection failures to the AM from the daemon for\nthe general keep-alive thread (milliseconds).", "llap.am.liveness.connection.sleep-between-retries-millis");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS", 1202, "hive.llap.task.scheduler.timeout.seconds", "60s", v, "Amount of time to wait before failing the query when there are no llap daemons running\n(alive) in the cluster.", "llap.daemon.scheduler.timeout.seconds");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_NUM_EXECUTORS", 1203, "hive.llap.daemon.num.executors", v, "Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\nexecuted in parallel.", "llap.daemon.num.executors");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_EXECUTORS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(0.2F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR", 1204, "hive.llap.mapjoin.memory.oversubscribe.factor", v, "Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\nby queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY", 1205, "hive.llap.memory.oversubscription.max.executors.per.query", v, "Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\nwhich memory for mapjoin can be borrowed. Default 3 (from 3 other executors\nhive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\nconversion decision will be made). This is only an upper bound. Lower bound is determined by number of\nexecutors and configured max concurrency.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(100000L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL", 1206, "hive.llap.mapjoin.memory.monitor.check.interval", v, "Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\nmemory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\nwhen running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_AM_REPORTER_MAX_THREADS", 1207, "hive.llap.daemon.am-reporter.max.threads", v, "Maximum number of threads to be used for AM reporter. If this is lower than number of\nexecutors in llap daemon, it would be set to number of executors at runtime.", "llap.daemon.am-reporter.max.threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_AM_REPORTER_MAX_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_RPC_PORT", 1208, "hive.llap.daemon.rpc.port", v, "The LLAP daemon RPC port.", "llap.daemon.rpc.port. A value of 0 indicates a dynamic port");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_RPC_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4096);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_MEMORY_PER_INSTANCE_MB", 1209, "hive.llap.daemon.memory.per.instance.mb", v, "The total amount of memory to use for the executors inside LLAP (in megabytes).", "llap.daemon.memory.per.instance.mb");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_MEMORY_PER_INSTANCE_MB> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_XMX_HEADROOM", 1210, "hive.llap.daemon.xmx.headroom", "5%", "The total amount of heap memory set aside by LLAP and not used by the executors. Can\nbe specified as size (e.g. \'512Mb\'), or percentage (e.g. \'5%\'). Note that the latter is\nderived from the total daemon XMX, which can be different from the total executor\nmemory if the cache is on-heap; although that\'s not the default configuration.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_XMX_HEADROOM> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_VCPUS_PER_INSTANCE", 1211, "hive.llap.daemon.vcpus.per.instance", v, "The total number of vcpus to use for the executors inside LLAP.", "llap.daemon.vcpus.per.instance");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_VCPUS_PER_INSTANCE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_NUM_FILE_CLEANER_THREADS", 1212, "hive.llap.daemon.num.file.cleaner.threads", v, "Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_NUM_FILE_CLEANER_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_FILE_CLEANUP_DELAY_SECONDS", 1213, "hive.llap.file.cleanup.delay.seconds", "0s", v, "How long to delay before cleaning up query files in LLAP (in seconds, for debugging).", "llap.file.cleanup.delay-seconds");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_FILE_CLEANUP_DELAY_SECONDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_SERVICE_HOSTS", 1214, "hive.llap.daemon.service.hosts", null, "Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\nYARN registry is used.", "llap.daemon.service.hosts");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_HOSTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_DAEMON_SERVICE_REFRESH_INTERVAL", 1215, "hive.llap.daemon.service.refresh.interval.sec", "60s", v, "LLAP YARN registry service list refresh delay, in seconds.", "llap.daemon.service.refresh.interval");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_SERVICE_REFRESH_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_COMMUNICATOR_NUM_THREADS", 1216, "hive.llap.daemon.communicator.num.threads", v, "Number of threads to use in LLAP task communicator in Tez AM.", "llap.daemon.communicator.num.threads");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_COMMUNICATOR_NUM_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_PLUGIN_CLIENT_NUM_THREADS", 1217, "hive.llap.plugin.client.num.threads", v, "Number of threads to use in LLAP task plugin client.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_PLUGIN_CLIENT_NUM_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS", 1218, "hive.llap.daemon.download.permanent.fns", v, "Whether LLAP daemon should localize the resources for permanent UDFs.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_MS", 1219, "hive.llap.task.scheduler.am.collect.daemon.metrics.ms", "0ms", v, "Collect llap daemon metrics in the AM every given milliseconds,\nso that the AM can use this information, to make better scheduling decisions.\nIf it\'s set to 0, then the feature is disabled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_LISTENER", 1220, "hive.llap.task.scheduler.am.collect.daemon.metrics.listener", "", "The listener which is called when new Llap Daemon statistics is received on AM side.\nThe listener should implement the org.apache.hadoop.hive.llap.tezplugins.metrics.LlapMetricsListener interface.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_COLLECT_DAEMON_METRICS_LISTENER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_NODEHEALTHCHECKS_MINTASKS", 1221, "hive.llap.nodehealthchecks.mintasks", v, "Specifies the minimum amount of tasks, executed by a particular LLAP daemon, before the health\nstatus of the node is examined.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MINTASKS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_NODEHEALTHCHECKS_MININTERVALDURATION", 1222, "hive.llap.nodehealthchecks.minintervalduration", "300s", v, "The minimum time that needs to elapse between two actions that are the correcting results of identifying\nan unhealthy node. Even if additional nodes are considered to be unhealthy, no action is performed until\nthis time interval has passed since the last corrective action.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MININTERVALDURATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_NODEHEALTHCHECKS_TASKTIMERATIO", 1223, "hive.llap.nodehealthchecks.tasktimeratio", v, "LLAP daemons are considered unhealthy, if their average (Map-) task execution time is significantly larger\nthan the average task execution time of other nodes. This value specifies the ratio of a node to other\nnodes, which is considered as threshold for unhealthy. A value of 1.5 for example considers a node to be\nunhealthy if its average task execution time is 50% larger than the average of other nodes.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_TASKTIMERATIO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(2.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_NODEHEALTHCHECKS_EXECUTORRATIO", 1224, "hive.llap.nodehealthchecks.executorratio", v, "If an unhealthy node is identified, it is blacklisted only where there is enough free executors to execute\nthe tasks. This value specifies the ratio of the free executors compared to the blacklisted ones.\nA value of 2.0 for example defines that we blacklist an unhealthy node only if we have 2 times more\nfree executors on the remaining nodes than the unhealthy node.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_EXECUTORRATIO> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_NODEHEALTHCHECKS_MAXNODES", 1225, "hive.llap.nodehealthchecks.maxnodes", v, "The maximum number of blacklisted nodes. If there are at least this number of blacklisted nodes\nthe listener will not blacklist further nodes even if all the conditions are met.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_NODEHEALTHCHECKS_MAXNODES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME", 1226, "hive.llap.task.scheduler.am.registry", "llap", "AM registry name for LLAP task scheduler plugin to register with.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL", 1227, "hive.llap.task.scheduler.am.registry.principal", "", "The name of the principal used to access ZK AM registry securely.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE", 1228, "hive.llap.task.scheduler.am.registry.keytab.file", "", "The path to the Kerberos keytab file used to access ZK AM registry securely.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS", 1229, "hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms", v, "Minimum time after which a previously disabled node will be re-enabled for scheduling,\nin milliseconds. This may be modified by an exponential back-off if failures persist.", "llap.task.scheduler.node.re-enable.min.timeout.ms");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS", 1230, "hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms", v, "Maximum time after which a previously disabled node will be re-enabled for scheduling,\nin milliseconds. This may be modified by an exponential back-off if failures persist.", "llap.task.scheduler.node.re-enable.max.timeout.ms");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(1.5F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR", 1231, "hive.llap.task.scheduler.node.disable.backoff.factor", v, "Backoff factor on successive blacklists of a node due to some failures. Blacklist times\nstart at the min timeout and go up to the max timeout based on this backoff factor.", "llap.task.scheduler.node.disable.backoff.factor");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT", 1232, "hive.llap.task.scheduler.preempt.independent", v, "Whether the AM LLAP scheduler should preempt a lower priority task for a higher pri one\neven if the former doesn\'t depend on the latter (e.g. for two parallel sides of a union).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE", 1233, "hive.llap.task.scheduler.num.schedulable.tasks.per.node", v, "The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\nthis should be picked up from the Registry. -1 indicates unlimited capacity; positive\nvalues indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(-1L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(9223372036854775807L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit,java.lang.Long,boolean,java.lang.Long,boolean)>(v, v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_TASK_SCHEDULER_LOCALITY_DELAY", 1234, "hive.llap.task.scheduler.locality.delay", "0ms", v, "Amount of time to wait before allocating a request which contains location information, to a location other than the ones requested. Set to -1 for an infinite delay, 0for no delay.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_SCHEDULER_LOCALITY_DELAY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS", 1235, "hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300", "Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n for percentile latency metrics. Used by LLAP daemon task scheduler metrics for\n time taken to kill task (due to preemption) and useful time wasted by the task that\n is about to be preempted.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE", 1236, "hive.llap.daemon.task.scheduler.wait.queue.size", v, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME", 1237, "hive.llap.daemon.wait.queue.comparator.class.name", "org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator", "The priority comparator to use for LLAP scheduler priority queue. The built-in options\nare org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n.....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION", 1238, "hive.llap.daemon.task.scheduler.enable.preemption", v, "Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\npreempted by finishable tasks inside LLAP scheduler.", "llap.daemon.task.scheduler.enable.preemption");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_DATA_POINTS", 1239, "hive.llap.daemon.metrics.timed.window.average.data.points", v, "The number of data points stored for calculating executor metrics timed averages.\nCurrently used for ExecutorNumExecutorsAvailableAverage and ExecutorNumQueuedRequestsAverage\n means that average calculation is turned off");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_DATA_POINTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit NANOSECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_WINDOW_LENGTH", 1240, "hive.llap.daemon.metrics.timed.window.average.window.length", "1m", v, "The length of the time window used for calculating executor metrics timed averages.\nCurrently used for ExecutorNumExecutorsAvailableAverage and ExecutorNumQueuedRequestsAverage\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_TIMED_WINDOW_AVERAGE_WINDOW_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_METRICS_SIMPLE_AVERAGE_DATA_POINTS", 1241, "hive.llap.daemon.metrics.simple.average.data.points", v, "The number of data points stored for calculating executor metrics simple averages.\nCurrently used for AverageQueueTime and AverageResponseTime\n means that average calculation is turned off");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_METRICS_SIMPLE_AVERAGE_DATA_POINTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS", 1242, "hive.llap.task.communicator.connection.timeout.ms", "16000ms", v, "Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.", "llap.task.communicator.connection.timeout-millis");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(30);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT", 1243, "hive.llap.task.communicator.listener.thread-count", v, "The number of task communicator listener threads.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(12);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_MAX_CONCURRENT_REQUESTS_PER_NODE", 1244, "hive.llap.max.concurrent.requests.per.daemon", v, "Maximum number of concurrent requests to one daemon from Tez AM");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_MAX_CONCURRENT_REQUESTS_PER_NODE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String,java.lang.String)>("LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS", 1245, "hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms", v, "Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\nconnection to LLAP daemon from Tez AM.", "llap.task.communicator.connection.sleep-between-retries-millis");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_UMBILICAL_SERVER_PORT", 1246, "hive.llap.daemon.umbilical.port", "0", "LLAP task umbilical server RPC port or range of ports to try in case the first port is occupied");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_UMBILICAL_SERVER_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15002);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_WEB_PORT", 1247, "hive.llap.daemon.web.port", v, "LLAP daemon web UI port.", "llap.daemon.service.port");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("LLAP_DAEMON_WEB_SSL", 1248, "hive.llap.daemon.web.ssl", v, "Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_SSL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_WEB_XFRAME_ENABLED", 1249, "hive.llap.daemon.web.xframe.enabled", v, "Whether to enable xframe on LLAP daemon webUI\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_WEB_XFRAME_VALUE", 1250, "hive.llap.daemon.web.xframe.value", "SAMEORIGIN", "Configuration to allow the user to set the x_frame-options value\n");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_WEB_XFRAME_VALUE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_CLIENT_CONSISTENT_SPLITS", 1251, "hive.llap.client.consistent.splits", v, "Whether to setup split locations to match nodes on which llap daemons are running, instead of using the locations provided by the split itself. If there is no llap daemon running, fall back to locations provided by the split. This is effective only if hive.execution.mode is llap");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_CLIENT_CONSISTENT_SPLITS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_SPLIT_LOCATION_PROVIDER_CLASS", 1252, "hive.llap.split.location.provider.class", "org.apache.hadoop.hive.ql.exec.tez.HostAffinitySplitLocationProvider", "Split location provider class to use during split generation for LLAP. This class should implement\norg.apache.hadoop.mapred.split.SplitLocationProvider interface");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_SPLIT_LOCATION_PROVIDER_CLASS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_VALIDATE_ACLS", 1253, "hive.llap.validate.acls", v, "Whether LLAP should reject permissive ACLs in some cases (e.g. its own management\nprotocol or ZK paths), similar to how ssh refuses a key with bad access permissions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_VALIDATE_ACLS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(15003);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_OUTPUT_SERVICE_PORT", 1254, "hive.llap.daemon.output.service.port", v, "LLAP daemon output service port");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT", 1255, "hive.llap.daemon.output.stream.timeout", "120s", v, "The timeout for the client to connect to LLAP output service and start the fragment\noutput after sending the fragment. The fragment will fail if its output is not claimed.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(131072);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE", 1256, "hive.llap.daemon.output.service.send.buffer.size", v, "Send buffer size to be used by LLAP daemon output service");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(8);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES", 1257, "hive.llap.daemon.output.service.max.pending.writes", v, "Maximum number of queued writes allowed per connection when sending data\n via the LLAP output service to external clients.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "default";
v[1] = "text";
v[2] = "orc";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT", 1258, "hive.llap.external.splits.temp.table.storage.format", "orc", v, "Storage format for temp tables created using LLAP external client");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_USE_HYBRID_CALENDAR", 1259, "hive.llap.external.client.use.hybrid.calendar", v, "Whether to use hybrid calendar for parsing of data/timestamps.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_USE_HYBRID_CALENDAR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_CLOUD_DEPLOYMENT_SETUP_ENABLED", 1260, "hive.llap.external.client.cloud.deployment.setup.enabled", v, "Tells whether to enable additional RPC port, auth mechanism for llap external clients. This is meantfor cloud based deployments. When true, it has following effects - \n. Enables an extra RPC port on LLAP daemon to accept fragments from external clients. Seehive.llap.external.client.cloud.rpc.port\n. Uses external hostnames of LLAP in splits, so that clients can submit from outside of cloud. Env variable PUBLIC_HOSTNAME should be available on LLAP machines.\n. Uses JWT based authentication for splits to be validated at LLAP. See hive.llap.external.client.cloud.jwt.shared.secret.provider");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_DEPLOYMENT_SETUP_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(30004);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_CLOUD_RPC_PORT", 1261, "hive.llap.external.client.cloud.rpc.port", v, "The LLAP daemon RPC port for external clients when llap is running in cloud environment.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_RPC_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(30005);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_CLOUD_OUTPUT_SERVICE_PORT", 1262, "hive.llap.external.client.cloud.output.service.port", v, "LLAP output service port when llap is running in cloud environment");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_OUTPUT_SERVICE_PORT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET_PROVIDER", 1263, "hive.llap.external.client.cloud.jwt.shared.secret.provider", "org.apache.hadoop.hive.llap.security.DefaultJwtSharedSecretProvider", "Shared secret provider to be used to sign JWT");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET_PROVIDER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET", 1264, "hive.llap.external.client.cloud.jwt.shared.secret", "", "The LLAP daemon RPC port for external clients when llap is running in cloud environment. Length of the secret should be >= 32 bytes");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_EXTERNAL_CLIENT_CLOUD_JWT_SHARED_SECRET> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_ENABLE_GRACE_JOIN_IN_LLAP", 1265, "hive.llap.enable.grace.join.in.llap", v, "Override if grace join should be allowed to run in llap.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_ENABLE_GRACE_JOIN_IN_LLAP> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_HS2_ENABLE_COORDINATOR", 1266, "hive.llap.hs.coordinator.enabled", v, "Whether to create the LLAP coordinator; since execution engine and container vs llap\nsettings are both coming from job configs, we don\'t know at start whether this should\nbe created. Default true.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_HS2_ENABLE_COORDINATOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "query-routing";
v[1] = "RFA";
v[2] = "console";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("LLAP_DAEMON_LOGGER", 1267, "hive.llap.daemon.logger", "query-routing", v, "logger used for llap-daemons.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_DAEMON_LOGGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_OUTPUT_FORMAT_ARROW", 1268, "hive.llap.output.format.arrow", v, "Whether LLapOutputFormatService should output arrow batches");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_OUTPUT_FORMAT_ARROW> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_COLLECT_LOCK_METRICS", 1269, "hive.llap.lockmetrics.collect", v, "Whether lock metrics (wait times, counts) are collected for LLAP related locks");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_COLLECT_LOCK_METRICS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("LLAP_TASK_TIME_SUMMARY", 1270, "hive.llap.task.time.print.summary", v, "Display queue and runtime of tasks by host for every query executed by the shell.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars LLAP_TASK_TIME_SUMMARY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit MILLISECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TRIGGER_VALIDATION_INTERVAL", 1271, "hive.trigger.validation.interval", "500ms", v, "Interval for validating triggers during execution of a query. Triggers defined in resource plan will get\nvalidated for all SQL operations after every defined interval (default: 500ms) and corresponding action\ndefined in the trigger will be taken");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TRIGGER_VALIDATION_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("NWAYJOINREORDER", 1272, "hive.reorder.nway.joins", v, "Runs reordering of tables within single n-way join (i.e.: picks streamtable)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars NWAYJOINREORDER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MERGE_NWAY_JOINS", 1273, "hive.merge.nway.joins", v, "Merge adjacent joins into a single n-way join");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MERGE_NWAY_JOINS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, null);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_LOG_N_RECORDS", 1274, "hive.log.every.n.records", v, v, "If value is greater than 0 logs in fixed intervals of size n rather than exponentially.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_N_RECORDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "throw";
v[1] = "skip";
v[2] = "ignore";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_MSCK_PATH_VALIDATION", 1275, "hive.msck.path.validation", "throw", v, "The approach msck should take with HDFS directories that are partition-like but contain unsupported characters. \'throw\' (an exception) is the default; \'skip\' will skip the invalid directories and still repair the others; \'ignore\' will skip the validation (legacy behavior, causes bugs in many cases)");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_PATH_VALIDATION> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(3000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MSCK_REPAIR_BATCH_SIZE", 1276, "hive.msck.repair.batch.size", v, "Batch size for the msck repair command. If the value is greater than zero,\n it will execute batch wise with the configured batch size. In case of errors while\nadding unknown partitions the batch size is automatically reduced by half in the subsequent\nretry attempt. The default value is 3000 which means it will execute in the batches of 3000.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES", 1277, "hive.msck.repair.batch.max.retries", v, "Maximum number of retries for the msck repair command when adding unknown partitions.\n If the value is greater than zero it will retry adding unknown partitions until the maximum\nnumber of attempts is reached or batch size is reduced to 0, whichever is earlier.\nIn each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\nIf the value is set to zero it will retry until the batch size becomes zero as described above.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_LLAP_CONCURRENT_QUERIES", 1278, "hive.server.llap.concurrent.queries", v, "The number of queries allowed in parallel via llap. Negative number implies \'infinite\'.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_LLAP_CONCURRENT_QUERIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_TEZ_ENABLE_MEMORY_MANAGER", 1279, "hive.tez.enable.memory.manager", v, "Enable memory manager for tez");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TEZ_ENABLE_MEMORY_MANAGER> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Float: java.lang.Float valueOf(float)>(2.0F);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_HASH_TABLE_INFLATION_FACTOR", 1280, "hive.hash.table.inflation.factor", v, "Expected inflation factor between disk/in memory representation of hash tables");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_HASH_TABLE_INFLATION_FACTOR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_LOG_TRACE_ID", 1281, "hive.log.trace.id", "", "Log tracing id that can be used by upstream clients for tracking respective logs. Truncated to 64 characters. Defaults to use auto-generated session id.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_LOG_TRACE_ID> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_MM_AVOID_GLOBSTATUS_ON_S3", 1282, "hive.mm.avoid.s.globstatus", v, "Whether to use listFiles (optimized on S3) instead of globStatus when on S3.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_MM_AVOID_GLOBSTATUS_ON_S3> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CONF_RESTRICTED_LIST", 1283, "hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager,hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager,hive.users.in.admin.role,hive.server.xsrf.filter.enabled,hive.security.authorization.enabled,hive.distcp.privileged.doAs,hive.server.authentication.ldap.baseDN,hive.server.authentication.ldap.url,hive.server.authentication.ldap.Domain,hive.server.authentication.ldap.groupDNPattern,hive.server.authentication.ldap.groupFilter,hive.server.authentication.ldap.userDNPattern,hive.server.authentication.ldap.userFilter,hive.server.authentication.ldap.groupMembershipKey,hive.server.authentication.ldap.userMembershipKey,hive.server.authentication.ldap.groupClassKey,hive.server.authentication.ldap.customLDAPQuery,hive.server.service.users,hive.server.graceful.stop.timeout,hive.privilege.synchronizer,hive.privilege.synchronizer.interval,hive.query.max.length,hive.druid.broker.address.default,hive.druid.coordinator.address.default,hikaricp.,hadoop.bin.path,yarn.bin.path,hive.driver.parallel.compilation.global.limit,hive.zookeeper.ssl.keystore.location,hive.zookeeper.ssl.keystore.password,hive.zookeeper.ssl.truststore.location,hive.zookeeper.ssl.truststore.password", "Comma separated list of configuration options which are immutable at runtime");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_RESTRICTED_LIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars METASTOREPWD>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_SSL_KEYSTORE_PASSWORD>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars DRUID_METADATA_DB_PASSWORD>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
v = dynamicinvoke "makeConcatWithConstants" <java.lang.String (java.lang.String,java.lang.String,java.lang.String,java.lang.String)>(v, v, v, v) <java.lang.invoke.StringConcatFactory: java.lang.invoke.CallSite makeConcatWithConstants(java.lang.invoke.MethodHandles$Lookup,java.lang.String,java.lang.invoke.MethodType,java.lang.String,java.lang.Object[])>("\u,\u,\u,\u,fs.s.awsAccessKeyId,fs.s.awsSecretAccessKey,fs.s3n.awsAccessKeyId,fs.s3n.awsSecretAccessKey,fs.s3a.access.key,fs.s3a.secret.key,fs.s3a.proxy.password,dfs.adls.oauth.credential,fs.adl.oauth.credential,fs.azure.account.oauth.client.secret,hive.zookeeper.ssl.keystore.location,hive.zookeeper.ssl.keystore.password,hive.zookeeper.ssl.truststore.location,hive.zookeeper.ssl.truststore.password");
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CONF_HIDDEN_LIST", 1284, "hive.conf.hidden.list", v, "Comma separated list of configuration options which should not be read by normal user like passwords");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_HIDDEN_LIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_CONF_INTERNAL_VARIABLE_LIST", 1285, "hive.conf.internal.variable.list", "hive.added.files.path,hive.added.jars.path,hive.added.archives.path", "Comma separated list of variables which are used internally and should not be configurable.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_CONF_INTERNAL_VARIABLE_LIST> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>()>();
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_QUERY_MAX_LENGTH", 1286, "hive.query.max.length", "10Mb", v, "The maximum size of a query string. Enforced after variable substitutions.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_LENGTH> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_QUERY_TIMEOUT_SECONDS", 1287, "hive.query.timeout.seconds", "0s", v, "Timeout for Running Query in seconds. A nonpositive value means infinite. If the query timeout is also set by thrift API call, the smaller one will be taken.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_TIMEOUT_SECONDS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
v = <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS>;
v = v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: java.lang.String varname>;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String,java.lang.String)>("HIVE_COMPUTE_SPLITS_NUM_THREADS", 1288, "hive.compute.splits.num.threads", v, "How many threads Input Format should use to create splits in parallel.", v);
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_COMPUTE_SPLITS_NUM_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
v = new org.apache.hadoop.hive.conf.Validator$SizeValidator;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(0L);
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(1024L);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$SizeValidator: void <init>(java.lang.Long,boolean,java.lang.Long,boolean)>(v, 1, v, 1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_EXEC_INPUT_LISTING_MAX_THREADS", 1289, "hive.exec.input.listing.max.threads", v, v, "Maximum number of threads that Hive uses to list file information from file systems (recommended > 1 for blobstore).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_EXEC_INPUT_LISTING_MAX_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_REEXECUTION_ENABLED", 1290, "hive.query.reexecution.enabled", v, "Enable query reexecutions");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_REEXECUTION_STRATEGIES", 1291, "hive.query.reexecution.strategies", "overlay,reoptimize,reexecute_lost_am,dagsubmit,recompile_without_cbo", "comma separated list of plugin can be used:\n  overlay: hiveconf subtree \'reexec.overlay\' is used as an overlay in case of an execution errors out\n  reoptimize: collects operator statistics during execution and recompile the query after a failure\n  recompile_without_cbo: recompiles query after a CBO failure\n  reexecute_lost_am: reexecutes query if it failed due to tez am node gets decommissioned");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STRATEGIES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$StringSet;
v = newarray (java.lang.String)[3];
v[0] = "query";
v[1] = "hiveserver";
v[2] = "metastore";
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$StringSet: void <init>(java.lang.String[])>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE", 1292, "hive.query.reexecution.stats.persist.scope", "metastore", v, "Sets the persistence scope of runtime statistics\n  query: runtime statistics are only used during re-execution\n  hiveserver: runtime statistics are persisted in the hiveserver - all sessions share it\n  metastore: runtime statistics are persisted in the metastore as well");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(5);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(20);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_TXN_MAX_RETRYSNAPSHOT_COUNT", 1293, "hive.txn.retrysnapshot.max.count", v, v, "Maximum number of snapshot invalidate attempts per request.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_TXN_MAX_RETRYSNAPSHOT_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_MAX_REEXECUTION_COUNT", 1294, "hive.query.reexecution.max.count", v, "Maximum number of re-executions for a single query.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_REEXECUTION_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS", 1295, "hive.query.reexecution.always.collect.operator.stats", v, "If sessionstats are enabled; this option can be used to collect statistics all the time");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = (int) -1;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE", 1296, "hive.query.reexecution.stats.cache.batch.size", v, "If runtime stats are stored in metastore; the maximal batch size per round during load.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(100000);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE", 1297, "hive.query.reexecution.stats.cache.size", v, "Size of the runtime statistics cache. Unit is: OperatorStat entry; a query plan consist ~100.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_PLANMAPPER_LINK_RELNODES", 1298, "hive.query.planmapper.link.relnodes", v, "Whether to link Calcite nodes to runtime statistics.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_PLANMAPPER_LINK_RELNODES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_MAX_RECOMPILATION_COUNT", 1299, "hive.query.recompilation.max.count", v, "Maximum number of re-compilations for a single query.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_MAX_RECOMPILATION_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SCHEDULED_QUERIES_EXECUTOR_ENABLED", 1300, "hive.scheduled.queries.executor.enabled", v, "Controls whether HS2 will run scheduled query executor.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SCHEDULED_QUERIES_NAMESPACE", 1301, "hive.scheduled.queries.namespace", "hive", "Sets the scheduled query namespace to be used. New scheduled queries are created in this namespace;and execution is also bound to the namespace");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_NAMESPACE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SCHEDULED_QUERIES_EXECUTOR_IDLE_SLEEP_TIME", 1302, "hive.scheduled.queries.executor.idle.sleep.time", "60s", v, "Time to sleep between querying for the presence of a scheduled query.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_IDLE_SLEEP_TIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SCHEDULED_QUERIES_EXECUTOR_PROGRESS_REPORT_INTERVAL", 1303, "hive.scheduled.queries.executor.progress.report.interval", "60s", v, "While scheduled queries are in flight; a background update happens periodically to report the actual state of the query");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_EXECUTOR_PROGRESS_REPORT_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SCHEDULED_QUERIES_CREATE_AS_ENABLED", 1304, "hive.scheduled.queries.create.as.enabled", v, "This option sets the default behaviour of newly created scheduled queries.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_CREATE_AS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SECURITY_AUTHORIZATION_SCHEDULED_QUERIES_SUPPORTED", 1305, "hive.security.authorization.scheduled.queries.supported", v, "Enable this if the configured authorizer is able to handle scheduled query related calls.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SECURITY_AUTHORIZATION_SCHEDULED_QUERIES_SUPPORTED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(4);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, null);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_SCHEDULED_QUERIES_MAX_EXECUTORS", 1306, "hive.scheduled.queries.max.executors", v, v, "Maximal number of scheduled query executors to allow.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SCHEDULED_QUERIES_MAX_EXECUTORS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, null);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ASYNC_CLEANUP_SERVICE_THREAD_COUNT", 1307, "hive.async.cleanup.service.thread.count", v, v, "Number of threads that run some eventual cleanup operations after queries/sessions close. 0 means cleanup is sync.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_THREAD_COUNT> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10000);
v = new org.apache.hadoop.hive.conf.Validator$RangeValidator;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(2147483647);
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$RangeValidator: void <init>(java.lang.Object,java.lang.Object)>(v, v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_ASYNC_CLEANUP_SERVICE_QUEUE_SIZE", 1308, "hive.async.cleanup.service.queue.size", v, v, "Size of the async cleanup queue. If cleanup queue is full, cleanup operations become synchronous. Applicable only when number of async cleanup is turned on.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ASYNC_CLEANUP_SERVICE_QUEUE_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_ENABLED", 1309, "hive.query.results.cache.enabled", v, "If the query results cache is enabled. This will keep results of previously executed queries to be reused if the same query is executed again.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED", 1310, "hive.query.results.cache.nontransactional.tables.enabled", v, "If the query results cache is enabled for queries involving non-transactional tables.Users who enable this setting should be willing to tolerate some amount of stale results in the cache.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS", 1311, "hive.query.results.cache.wait.for.pending.results", v, "Should a query wait for the pending results of an already running query, in order to use the cached result when it becomes ready");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_DIRECTORY", 1312, "hive.query.results.cache.directory", "/tmp/hive/_resultscache_", "Location of the query results cache directory. Temporary results from queries will be moved to this location.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_DIRECTORY> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME", 1313, "hive.query.results.cache.max.entry.lifetime", "3600s", v, "Maximum lifetime in seconds for an entry in the query results cache. A nonpositive value means infinite.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(2147483648L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_MAX_SIZE", 1314, "hive.query.results.cache.max.size", v, "Maximum total size in bytes that the query results cache directory is allowed to use on the filesystem.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Long: java.lang.Long valueOf(long)>(10485760L);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE", 1315, "hive.query.results.cache.max.entry.size", v, "Maximum size in bytes that a single query result is allowed to use in the results cache directory");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = new org.apache.hadoop.hive.conf.Validator$TimeValidator;
v = <java.util.concurrent.TimeUnit: java.util.concurrent.TimeUnit SECONDS>;
specialinvoke v.<org.apache.hadoop.hive.conf.Validator$TimeValidator: void <init>(java.util.concurrent.TimeUnit)>(v);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,org.apache.hadoop.hive.conf.Validator,java.lang.String)>("HIVE_NOTFICATION_EVENT_POLL_INTERVAL", 1316, "hive.notification.event.poll.interval", "60s", v, "How often the notification log is polled for new NotificationEvents from the metastore.A nonpositive value means the notification log is never polled.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_POLL_INTERVAL> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_NOTFICATION_EVENT_CONSUMERS", 1317, "hive.notification.event.consumers", "org.apache.hadoop.hive.ql.cache.results.QueryResultsCache$InvalidationEventConsumer", "Comma-separated list of class names extending EventConsumer,to handle the NotificationEvents retrieved by the notification event poll.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_NOTFICATION_EVENT_CONSUMERS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_DESCRIBE_PARTITIONED_TABLE_IGNORE_STATS", 1318, "hive.describe.partitionedtable.ignore.stats", v, "Disable partitioned table stats collection for \'DESCRIBE FORMATTED\' or \'DESCRIBE EXTENDED\' commands.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_DESCRIBE_PARTITIONED_TABLE_IGNORE_STATS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Integer: java.lang.Integer valueOf(int)>(10);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_SERVER2_ICEBERG_METADATA_GENERATOR_THREADS", 1319, "hive.server.iceberg.metadata.generator.threads", v, "Number of threads used to scan partition directories for data files and update/generate iceberg metadata");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_SERVER2_ICEBERG_METADATA_GENERATOR_THREADS> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_BLOBSTORE_SUPPORTED_SCHEMES", 1320, "hive.blobstore.supported.schemes", "s,s3a,s3n", "Comma-separated list of supported blobstore schemes.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_SUPPORTED_SCHEMES> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(0);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR", 1321, "hive.blobstore.use.blobstore.as.scratchdir", v, "Enable the use of scratch directories directly on blob storage systems (it may cause performance penalties).");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
v = staticinvoke <java.lang.Boolean: java.lang.Boolean valueOf(boolean)>(1);
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED", 1322, "hive.blobstore.optimizations.enabled", v, "This parameter enables a number of optimizations when running on blobstores:\n(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force the last Hive job to write to the blobstore.\nThis is a performance optimization that forces the final FileSinkOperator to write to the blobstore.\nSee HIVE-15121 for details.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED> = v;
v = new org.apache.hadoop.hive.conf.HiveConf$ConfVars;
specialinvoke v.<org.apache.hadoop.hive.conf.HiveConf$ConfVars: void <init>(java.lang.String,int,java.lang.String,java.lang.Object,java.lang.String)>("HIVE_ADDITIONAL_CONFIG_FILES", 1323, "hive.additional.config.files", "", "The names of additional config files, such as ldap-site.xml,tez-site.xml, etc in comma separated list.");
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars HIVE_ADDITIONAL_CONFIG_FILES> = v;
v = staticinvoke <org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars[] $values()>();
<org.apache.hadoop.hive.conf.HiveConf$ConfVars: org.apache.hadoop.hive.conf.HiveConf$ConfVars[] $VALUES> = v;
return;
}
}