Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
提交反馈
为 GitLab 提交贡献
登录
切换导航
S
study-report
项目
项目
详情
动态
版本
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
成员
成员
收起侧边栏
Close sidebar
动态
分支图
统计图
提交
打开侧边栏
zhenxin.ma
study-report
提交
86828c95
提交
86828c95
编写于
12月 23, 2019
作者:
zhenxin.ma
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
生产集群配置文件
上级
c3681b0e
变更
3
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
499 行增加
和
0 行删除
+499
-0
core-site.xml
src/main/resources/core-site.xml
+137
-0
hdfs-site.xml
src/main/resources/hdfs-site.xml
+101
-0
hive-site.xml
src/main/resources/hive-site.xml
+261
-0
未找到文件。
src/main/resources/core-site.xml
0 → 100644
浏览文件 @
86828c95
<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
<property>
<name>
fs.defaultFS
</name>
<value>
hdfs://bi-name1
</value>
</property>
<property>
<name>
fs.trash.interval
</name>
<value>
1
</value>
</property>
<property>
<name>
io.compression.codecs
</name>
<value>
org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec
</value>
</property>
<property>
<name>
hadoop.security.authentication
</name>
<value>
simple
</value>
</property>
<property>
<name>
hadoop.security.authorization
</name>
<value>
false
</value>
</property>
<property>
<name>
hadoop.rpc.protection
</name>
<value>
authentication
</value>
</property>
<property>
<name>
hadoop.security.auth_to_local
</name>
<value>
DEFAULT
</value>
</property>
<property>
<name>
hadoop.proxyuser.oozie.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.oozie.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.flume.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.flume.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.HTTP.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.HTTP.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hive.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hive.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hue.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hue.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.httpfs.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.httpfs.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hdfs.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.hdfs.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.yarn.hosts
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.proxyuser.yarn.groups
</name>
<value>
*
</value>
</property>
<property>
<name>
hadoop.security.group.mapping
</name>
<value>
org.apache.hadoop.security.ShellBasedUnixGroupsMapping
</value>
</property>
<property>
<name>
hadoop.security.instrumentation.requires.admin
</name>
<value>
false
</value>
</property>
<property>
<name>
net.topology.script.file.name
</name>
<value>
/etc/hadoop/conf.cloudera.yarn/topology.py
</value>
</property>
<property>
<name>
io.file.buffer.size
</name>
<value>
65536
</value>
</property>
<property>
<name>
hadoop.ssl.enabled
</name>
<value>
false
</value>
</property>
<property>
<name>
hadoop.ssl.require.client.cert
</name>
<value>
false
</value>
<final>
true
</final>
</property>
<property>
<name>
hadoop.ssl.keystores.factory.class
</name>
<value>
org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory
</value>
<final>
true
</final>
</property>
<property>
<name>
hadoop.ssl.server.conf
</name>
<value>
ssl-server.xml
</value>
<final>
true
</final>
</property>
<property>
<name>
hadoop.ssl.client.conf
</name>
<value>
ssl-client.xml
</value>
<final>
true
</final>
</property>
</configuration>
src/main/resources/hdfs-site.xml
0 → 100644
浏览文件 @
86828c95
<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
<property>
<name>
dfs.nameservices
</name>
<value>
bi-name1
</value>
</property>
<property>
<name>
dfs.client.failover.proxy.provider.bi-name1
</name>
<value>
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
</value>
</property>
<property>
<name>
dfs.ha.automatic-failover.enabled.bi-name1
</name>
<value>
true
</value>
</property>
<property>
<name>
ha.zookeeper.quorum
</name>
<value>
hdfs127:2181,hdfs128:2181,hdfs129:2181
</value>
</property>
<property>
<name>
dfs.ha.namenodes.bi-name1
</name>
<value>
namenode127,namenode153
</value>
</property>
<property>
<name>
dfs.namenode.rpc-address.bi-name1.namenode127
</name>
<value>
hdfs127:8020
</value>
</property>
<property>
<name>
dfs.namenode.servicerpc-address.bi-name1.namenode127
</name>
<value>
hdfs127:8022
</value>
</property>
<property>
<name>
dfs.namenode.http-address.bi-name1.namenode127
</name>
<value>
hdfs127:9870
</value>
</property>
<property>
<name>
dfs.namenode.https-address.bi-name1.namenode127
</name>
<value>
hdfs127:9871
</value>
</property>
<property>
<name>
dfs.namenode.rpc-address.bi-name1.namenode153
</name>
<value>
hdfs130:8020
</value>
</property>
<property>
<name>
dfs.namenode.servicerpc-address.bi-name1.namenode153
</name>
<value>
hdfs130:8022
</value>
</property>
<property>
<name>
dfs.namenode.http-address.bi-name1.namenode153
</name>
<value>
hdfs130:9870
</value>
</property>
<property>
<name>
dfs.namenode.https-address.bi-name1.namenode153
</name>
<value>
hdfs130:9871
</value>
</property>
<property>
<name>
dfs.replication
</name>
<value>
3
</value>
</property>
<property>
<name>
dfs.blocksize
</name>
<value>
134217728
</value>
</property>
<property>
<name>
dfs.client.use.datanode.hostname
</name>
<value>
false
</value>
</property>
<property>
<name>
fs.permissions.umask-mode
</name>
<value>
022
</value>
</property>
<property>
<name>
dfs.client.block.write.locateFollowingBlock.retries
</name>
<value>
7
</value>
</property>
<property>
<name>
dfs.namenode.acls.enabled
</name>
<value>
false
</value>
</property>
<property>
<name>
dfs.client.read.shortcircuit
</name>
<value>
false
</value>
</property>
<property>
<name>
dfs.domain.socket.path
</name>
<value>
/var/run/hdfs-sockets/dn
</value>
</property>
<property>
<name>
dfs.client.read.shortcircuit.skip.checksum
</name>
<value>
false
</value>
</property>
<property>
<name>
dfs.client.domain.socket.data.traffic
</name>
<value>
false
</value>
</property>
<property>
<name>
dfs.datanode.hdfs-blocks-metadata.enabled
</name>
<value>
true
</value>
</property>
</configuration>
src/main/resources/hive-site.xml
0 → 100644
浏览文件 @
86828c95
<?xml version="1.0" encoding="UTF-8"?>
<!--Autogenerated by Cloudera Manager-->
<configuration>
<property>
<name>
hive.metastore.uris
</name>
<value>
thrift://hdfs127:9083
</value>
</property>
<property>
<name>
hive.metastore.client.socket.timeout
</name>
<value>
300
</value>
</property>
<property>
<name>
hive.metastore.warehouse.dir
</name>
<value>
/user/hive/warehouse
</value>
</property>
<property>
<name>
hive.warehouse.subdir.inherit.perms
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.auto.convert.join
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.auto.convert.join.noconditionaltask.size
</name>
<value>
20971520
</value>
</property>
<property>
<name>
hive.optimize.bucketmapjoin.sortedmerge
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.smbjoin.cache.rows
</name>
<value>
10000
</value>
</property>
<property>
<name>
hive.server2.logging.operation.enabled
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.server2.logging.operation.log.location
</name>
<value>
/var/log/hive/operation_logs
</value>
</property>
<property>
<name>
mapred.reduce.tasks
</name>
<value>
-1
</value>
</property>
<property>
<name>
hive.exec.reducers.bytes.per.reducer
</name>
<value>
67108864
</value>
</property>
<property>
<name>
hive.exec.copyfile.maxsize
</name>
<value>
33554432
</value>
</property>
<property>
<name>
hive.exec.reducers.max
</name>
<value>
1099
</value>
</property>
<property>
<name>
hive.vectorized.groupby.checkinterval
</name>
<value>
4096
</value>
</property>
<property>
<name>
hive.vectorized.groupby.flush.percent
</name>
<value>
0.1
</value>
</property>
<property>
<name>
hive.compute.query.using.stats
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.vectorized.execution.enabled
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.vectorized.execution.reduce.enabled
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.vectorized.use.vectorized.input.format
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.vectorized.use.checked.expressions
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.vectorized.use.vector.serde.deserialize
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.vectorized.adaptor.usage.mode
</name>
<value>
chosen
</value>
</property>
<property>
<name>
hive.vectorized.input.format.excludes
</name>
<value>
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
</value>
</property>
<property>
<name>
hive.merge.mapfiles
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.merge.mapredfiles
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.cbo.enable
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.fetch.task.conversion
</name>
<value>
minimal
</value>
</property>
<property>
<name>
hive.fetch.task.conversion.threshold
</name>
<value>
268435456
</value>
</property>
<property>
<name>
hive.limit.pushdown.memory.usage
</name>
<value>
0.1
</value>
</property>
<property>
<name>
hive.merge.sparkfiles
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.merge.smallfiles.avgsize
</name>
<value>
16777216
</value>
</property>
<property>
<name>
hive.merge.size.per.task
</name>
<value>
268435456
</value>
</property>
<property>
<name>
hive.optimize.reducededuplication
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.optimize.reducededuplication.min.reducer
</name>
<value>
4
</value>
</property>
<property>
<name>
hive.map.aggr
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.map.aggr.hash.percentmemory
</name>
<value>
0.5
</value>
</property>
<property>
<name>
hive.optimize.sort.dynamic.partition
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.execution.engine
</name>
<value>
mr
</value>
</property>
<property>
<name>
spark.executor.memory
</name>
<value>
3255880908b
</value>
</property>
<property>
<name>
spark.driver.memory
</name>
<value>
966367641b
</value>
</property>
<property>
<name>
spark.executor.cores
</name>
<value>
4
</value>
</property>
<property>
<name>
spark.yarn.driver.memoryOverhead
</name>
<value>
102m
</value>
</property>
<property>
<name>
spark.yarn.executor.memoryOverhead
</name>
<value>
547m
</value>
</property>
<property>
<name>
spark.dynamicAllocation.enabled
</name>
<value>
true
</value>
</property>
<property>
<name>
spark.dynamicAllocation.initialExecutors
</name>
<value>
1
</value>
</property>
<property>
<name>
spark.dynamicAllocation.minExecutors
</name>
<value>
1
</value>
</property>
<property>
<name>
spark.dynamicAllocation.maxExecutors
</name>
<value>
2147483647
</value>
</property>
<property>
<name>
hive.metastore.execute.setugi
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.support.concurrency
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.zookeeper.quorum
</name>
<value>
hdfs127,hdfs128,hdfs129
</value>
</property>
<property>
<name>
hive.zookeeper.client.port
</name>
<value>
2181
</value>
</property>
<property>
<name>
hive.zookeeper.namespace
</name>
<value>
hive_zookeeper_namespace_hive
</value>
</property>
<property>
<name>
hbase.zookeeper.quorum
</name>
<value>
hdfs127,hdfs128,hdfs129
</value>
</property>
<property>
<name>
hbase.zookeeper.property.clientPort
</name>
<value>
2181
</value>
</property>
<property>
<name>
hive.cluster.delegation.token.store.class
</name>
<value>
org.apache.hadoop.hive.thrift.MemoryTokenStore
</value>
</property>
<property>
<name>
hive.server2.enable.doAs
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.server2.use.SSL
</name>
<value>
false
</value>
</property>
<property>
<name>
spark.shuffle.service.enabled
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.strict.checks.orderby.no.limit
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.strict.checks.no.partition.filter
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.strict.checks.type.safety
</name>
<value>
true
</value>
</property>
<property>
<name>
hive.strict.checks.cartesian.product
</name>
<value>
false
</value>
</property>
<property>
<name>
hive.strict.checks.bucketing
</name>
<value>
true
</value>
</property>
</configuration>
写
预览
Markdown
格式
0%
请重试
or
附加一个文件
附加文件
取消
您添加了
0
人
到此讨论。请谨慎行事。
先完成此消息的编辑!
取消
想要评论请
注册
或
登录