写在前面的话,安装这些东西的话,最好用脚本,不容易出错,下面写的是手动部署的过程
环境初始化
一定要先挂载好磁盘
hostname
hostname vi /etc/sysconfig/network HOSTNAME=ls-2pa-au-01
centos7
hostnamectl set-hostname xxx
vi /etc/hosts
172.24.x.a ls-2pa-au-01
172.24.x.b ls-2pb-au-02
172.24.x.c ls-2pc-au-03
192.168.x.x mirror.workplace.com
打通
ssh-keygen -t rsa
for i in {a..c};do ssh-copy-id root@172.24.2.$i;done
for i in {a..c};do ssh 172.24.2.$i "echo Ok";done
关于端口
vi /etc/ssh/ssh_config
for i in {b..c}; do scp /etc/hosts 172.24.2.$i:/etc/; done
重启
for i in {192..193} ; do ssh 172.24.2.$i "init 6" ; done;
yum
wget 安装
./configure
make
make install
make clean
cd /etc/yum.repos.d
cat /etc/redhat-release
centos7
wget http://mirror.workplace.com/yum/centos/CentOS-Base.repo
wget http://mirror.workplace.com/yum/epel/epel.repo
wget http://mirror.workplace.com/yum/ambari/centos7/ambari.repo
wget http://mirror.workplace.com/yum/HDP/centos7/2.x/updates/2.6.1.0/hdp.repo
wget http://mirror.workplace.com/yum/HDP-UTILS-1.1.0.21/repos/centos7/hdp-util.repo
for i in {b..c}; do scp -r /etc/yum.repos.d 172.24.2.$i:/etc/; done
ntp
for i in {a..c}; do ssh 172.24.2.$i "yum install ntp -y"; done
vi /opt/time_sync.sh
for i in {a..c}; do ssh -p3222 172.24.2.$i 'cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime; ntpdate -u 172.24.2.10; date'; done
chmod +x /opt/time_sync.sh
crontab -e
*/1 * * * * /opt/time_sync.sh
for i in {a..c}; do ssh 172.24.2.$i "service ntpd start"; done
for i in {a..c}; do ssh 172.24.2.$i "chkconfig ntpd on"; done
检查时间是否正常
for i in {a..c}; do ssh 172.24.2.$i "date"; done
limit参数初始化
for i in {a..c}; do ssh 172.24.2.$i "echo '* soft nofile 100000' >> /etc/security/limits.conf && echo '* hard nofile 100001' >> /etc/security/limits.conf && echo '* soft nproc 65535' >> /etc/security/limits.conf && echo '* hard nproc 65535' >> /etc/security/limits.conf"; done
防火墙
for i in {a..c}; do ssh 172.24.2.$i "service iptables stop && service iptables status"; done
jdk
wget http://mirror.workplace.com/JDK/jdk1.8.0_131.tar.gz
ln -s /opt/jdk1.8.0_131 /opt/java
打包传至其他节点
tar -zcvf jdk1.8.0_131.tar.gz java jdk1.8.0_131/
传
for i in {b..c}; do scp jdk1.8.0_131.tar.gz 172.24.2.$i:/opt/; done
解压
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/jdk1.8.0_131.tar.gz -C /opt/ && rm -rf /opt/jdk1.8.0_131.tar.gz && ls -al /opt/"; done
环境变量
for i in {a..c}; do ssh 172.24.2.$i "echo 'export JAVA_HOME=/opt/java' >>/etc/profile;echo 'export CLASSPATH=:$JAVA_HOME/lib' >>/etc/profile;echo 'export PATH=$PATH:$JAVA_HOME/bin' >>/etc/profile";done
for i in {a..c}; do ssh 172.24.2.$i "source /etc/profile";done
for i in {a..c}; do ssh 172.24.2.$i "java -version";done
mysql-connector
for i in {a..c};do ssh 172.24.2.$i "yum install mysql-connector-java -y ";done
升级
wget http://mirror.workplace.com/mysql/5.6.28/mysql-connector-java-5.1.42.jar
for i in {a..c};do ssh 172.24.2.$i "rm -r /usr/share/java/mysql-connector-java.jar";done
cp /opt/mysql-connector-java-5.1.42.jar /usr/share/java/
cd /usr/share/java/
for i in {a..c}; do scp /usr/share/java/mysql-connector-java-5.1.42.jar 172.24.2.$i:/usr/share/java/; done
for i in {a..c};do ssh 172.24.2.$i "ln -s /usr/share/java/mysql-connector-java-5.1.42.jar /usr/share/java/mysql-connector-java.jar";done
mysql
wget http://mirror.workplace.com/mysql/5.6.28/mysql-community-client-5.6.28-2.el6.x86_64.rpm
wget http://mirror.workplace.com/mysql/5.6.28/mysql-community-common-5.6.28-2.el6.x86_64.rpm
wget http://mirror.workplace.com/mysql/5.6.28/mysql-community-devel-5.6.28-2.el6.x86_64.rpm
wget http://mirror.workplace.com/mysql/5.6.28/mysql-community-libs-5.6.28-2.el6.x86_64.rpm
wget http://mirror.workplace.com/mysql/5.6.28/mysql-community-server-5.6.28-2.el6.x86_64.rpm
common
lib
client
server
yum install mariadb-libs -y
yum install perl perl-DBI libaio*
原系统若有相关,会报conflict错误
需要yum remove 删除冲突版本即可
卸载系统中原来的mysql相关库(如果有),rpm -qa| grep mysql, rpm -e --nodeps,注意 CentOS7需 rpm -e --nodeps mariadb-libs-5.5.52-1.el7.x86_64
依次安装所需mysql组件: rpm -ivh ,顺序(以CentOS7为例): mysql-community-common-5.6.28-2.el7.x86_64.rpm,mysql-community-libs-5.6.28-2.el7.x86_64.rpm,mysql-community-client-5.6.28-2.el7.x86_64.rpm,mysql-community-server-5.6.28-2.el7.x86_64.rpm
初始化mysql(设置初始数据库和用户权限):
修改my.cnf,设置数据目录到/opt: datadir=/opt/mysql
启动mysql并设置自动启动:
对于CentOS5/6: service mysql start;chkconfig mysql on
对于CentOS7: systemctl start mysqld.service;systemctl enable mysqld.service
设置mysqlroot密码及安全:
/usr/bin/mysql_secure_installation
性能优化(生产或并发大的服务器,需进行此设置)
/usr/bin/mysql_secure_installation
(mysqladmin -u root password "newpwd" 正常不需要,用上面的即可)
use mysql;
UPDATE user SET Password = password ( 'workplace' ) WHERE User = 'root' ;
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'workplace';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY 'workplace';
在安装hdp后再去动my.conf,或者说ambari和服务所用的mysql分开
vim /etc/my.cnf
[client]
#password = your_password
port = 3306
socket = /tmp/mysql.sock
default-character-set = utf8mb4 # Here follows entries for some specific programs # The MySQL server [mysqld] port = 3306 socket = /tmp/mysql.sock skip-external-locking key_buffer_size = 16M max_allowed_packet = 1M table_open_cache = 64 sort_buffer_size = 512K net_buffer_length = 8K read_buffer_size = 256K read_rnd_buffer_size = 512K myisam_sort_buffer_size = 8M character_set_server=utf8 init_connect='SET NAMES utf8' character-set-server = utf8mb4 collation-server = utf8mb4_unicode_ci #skip-networking # Replication Master Server (default) # binary logging is required for replication log-bin=mysql-bin # binary logging format - mixed recommended binlog_format=mixed # required unique id between 1 and 2^32 - 1 # defaults to 1 if master-host is not set # but will not function as a master if omitted server-id = 1 [mysqldump] quick max_allowed_packet = 16M [mysql] no-auto-rehash default-character-set = utf8mb4 # Remove the next comment character if you are not familiar with SQL #safe-updates [myisamchk] key_buffer_size = 20M sort_buffer_size = 20M read_buffer = 2M write_buffer = 2M [mysqlhotcopy] interactive-timeout
ambari
创建ambari用户
for i in {a..c}; do ssh 172.24.2.$i "groupadd ambari && useradd -g ambari -d /opt/ambari ambari"; done
for i in {a..c}; do ssh 172.24.2.$i "echo 'workplace' | passwd --stdin ambari"; done
ambari用户之间打通key
ssh-keygen -t rsa
for i in {a..c};do ssh-copy-id ambari@172.24.2.$i;done
for i in {a..c};do ssh 172.24.2.$i "echo Ok";done
ambari数据库和hive数据库创建
mysql -uroot -p
create database ambari character set utf8;
create user ambari identified by 'workplace';
grant all privileges on ambari.* to 'ambari'@'localhost' identified BY 'workplace';
grant all privileges on ambari.* to 'ambari'@'172.24.2.a' identified BY 'workplace';
grant all privileges on ambari.* to 'ambari'@'ls-2pa-au-01' identified BY 'workplace';
grant all privileges on ambari.* to 'ambari'@'%' identified BY 'workplace';
create database hive character set utf8;
create user hive identified by 'workplace';
grant all privileges on hive.* to 'hive'@'localhost' identified BY 'workplace';
grant all privileges on hive.* to 'hive'@'172.24.2.a' identified BY 'workplace';
grant all privileges on hive.* to 'hive'@'ls-2pa-au-01' identified BY 'workplace';
grant all privileges on hive.* to 'hive'@'%' identified BY 'workplace';
flush privileges;
ambari安装
yum install ambari-server
[root@ls-2pa-au-01 opt]# ambari-server setup
Using python /usr/bin/python
Setup ambari-server
Checking SELinux...
SELinux status is 'disabled'
Customize user account for ambari-server daemon [y/n] (n)? y
Enter user account for ambari-server daemon (root):ambari
Adjusting ambari-server permissions and ownership...
Checking firewall status...
Checking JDK...
[1] Oracle JDK 1.8 + Java Cryptography Extension (JCE) Policy Files 8
[2] Oracle JDK 1.7 + Java Cryptography Extension (JCE) Policy Files 7
[3] Custom JDK
==============================================================================
Enter choice (1): 3
WARNING: JDK must be installed on all hosts and JAVA_HOME must be valid on all hosts.
WARNING: JCE Policy files are required for configuring Kerberos security. If you plan to use Kerberos,please make sure JCE Unlimited Strength Jurisdiction Policy Files are valid on all hosts.
Path to JAVA_HOME: /opt/java
Validating JDK on Ambari Server...done.
Completing setup...
Configuring database...
Enter advanced database configuration [y/n] (n)? y
Configuring database...
==============================================================================
Choose one of the following options:
[1] - PostgreSQL (Embedded)
[2] - Oracle
[3] - MySQL / MariaDB
[4] - PostgreSQL
[5] - Microsoft SQL Server (Tech Preview)
[6] - SQL Anywhere
[7] - BDB
==============================================================================
Enter choice (1): 3
Hostname (localhost): 172.24.2.a
Port (3306):
Database name (ambari):
Username (ambari):
Enter Database Password (bigdata):
Re-enter password:
Configuring ambari database...
Configuring remote database connection properties...
WARNING: Before starting Ambari Server, you must run the following DDL against the database to create the schema: /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql
Proceed with configuring remote database connection properties [y/n] (y)? y
Extracting system views...
ambari-admin-2.5.1.0.159.jar
...........
Adjusting ambari-server permissions and ownership...
Ambari Server 'setup' completed successfully.
ambari数据库脚本导入
mysql -uambari -pbaifendain
use ambari;
source /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql;
启动
ambari-server start
ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar
HDP安装
界面化安装,很简单,点一点即可
ambari-server服务器ip:8080
账号:admin
密码:admin
lanch install wizard
name
Use Local Repository
http://mirror.workplace.com/yum/HDP/centos7/2.x/updates/2.6.1.0/
http://mirror.workplace.com/yum/HDP-UTILS-1.1.0.21/repos/centos7/
private key
端口注意
安装的时候要注意hdfs路径设置
注意看能不能将smartsense删除
create database ranger character set utf8;
create user ranger identified by 'workplace';
grant all privileges on ranger.* to 'ranger'@'localhost' identified BY 'workplace';
grant all privileges on ranger.* to 'ranger'@'172.24.2.a' identified BY 'workplace';
grant all privileges on ranger.* to 'ranger'@'ls-2pa-au-01' identified BY 'workplace';
grant all privileges on ranger.* to 'ranger'@'%' identified BY 'workplace';
grant all privileges on ranger.* to 'rangeradmin'@'localhost' identified BY 'workplace';
grant all privileges on ranger.* to 'rangeradmin'@'172.24.2.a' identified BY 'workplace';
grant all privileges on ranger.* to 'rangeradmin'@'ls-2pa-au-01' identified BY 'workplace';
grant all privileges on ranger.* to 'rangeradmin'@'%' identified BY 'workplace';
grant all privileges on ranger.* to 'rangeradmin'@'%' identified BY 'workplace' with grant option;
grant all privileges on ranger.* to 'root'@'%' identified BY 'workplace' with grant option;
flush privileges;
ranger、及ha enable在集群安装以后根据提示安装即可
ELK
环境
for i in {a..c}; do ssh 172.24.2.$i "echo '* soft nofile 1048576' >> /etc/security/limits.conf && echo '* hard nofile 1048576' >> /etc/security/limits.conf && echo '* soft nproc 1048576' >> /etc/security/limits.conf && echo '* hard nproc 1048576' >> /etc/security/limits.conf && echo '* hard memlock 67108864' >> /etc/security/limits.conf && echo '* soft memlock 67108864' >> /etc/security/limits.conf"; done
ulimit -d
ulimit -m
ulimit -n
vi /etc/security/limits.conf
vi /etc/security/limits.d/20-nproc.conf
1048576
for i in {a..c}; do ssh 172.24.2.$i "/etc/init.d/iptables stop; chkconfig iptables off; service iptables status"; done
for i in {a..c}; do ssh 172.24.2.$i "echo "vm.max_map_count = 262144" >> /etc/sysctl.conf; sysctl -p"; done
node
cd /opt
wget http://mirror.workplace.com/node/node-v6.11.0.tar.gz
tar -zxvf node-v6.11.0.tar.gz
ln -s /opt/node-v6.11.0 /opt/node
tar -zcvf node.tar.gz node-v6.11.0 node
for i in {b..c}; do scp node.tar.gz 172.24.2.$i:/opt/; done
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/node.tar.gz -C /opt/ && ls -al /opt/"; done
vi /etc/profile
export NODE_HOME=/opt/node
export PATH=${NODE_HOME}/bin:${PATH}
for i in {b..c}; do scp /etc/profile 172.24.2.$i:/etc/; done
for i in {b..c}; do ssh 172.24.2.$i "source /etc/profile && node -v && npm -v"; done
es
wget http://mirror.workplace.com/elasticsearch/elasticsearch-5.2.2.tar.gz
tar xf elasticsearch-5.2.2.tar.gz
创建用户
for i in {a..c}; do ssh 172.24.2.$i "groupadd sys && useradd -g sys -d /opt/syshome sys"; done
for i in {a..c}; do ssh 172.24.2.$i "echo 'workplace' | passwd --stdin sys"; done
ln -s /opt/elasticsearch-5.2.2 /opt/elasticsearch
chown sys.sys elasticsearch-5.2.2 elasticsearch -R
su sys
vi /opt/elasticsearch/config/elasticsearch.yml
cluster.name: wind_index
node.name: ls-2pa-au-01
node.master: true
node.data: false
path.data: /opt/data1/elasticsearch/data, /opt/data2/elasticsearch/data, /opt/data3/elasticsearch/data, /opt/data4/elasticsearch/data, /opt/data5/elasticsearch/data, /opt/data6/elasticsearch/data, /opt/data7/elasticsearch/data, /opt/data8/elasticsearch/data, /opt/data9/elasticsearch/data, /opt/data10/elasticsearch/data, /opt/data11/elasticsearch/data, /opt/data12/elasticsearch/data
path.logs: /opt/elasticsearch/logs
network.host: 192.168.x.a
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts:
- ls-2pa-au-01:9300
- ls-2pb-au-02:9300
- ls-2pc-au-03:9300
discovery.zen.minimum_master_nodes: 2
#discovery.zen.fd.ping_interval: 60s
#discovery.zen.fd.ping_timeout: 300s
#discovery.zen.fd.ping_retries: 6
http.cors.enabled: true
http.cors.allow-origin: "*"
bootstrap.system_call_filter: false
#transport.type: netty3
#http.type: netty3
tar -zcvf elasticsearch.tar.gz elasticsearch elasticsearch-5.2.2
for i in {b..c}; do scp elasticsearch.tar.gz 172.24.2.$i:/opt/; done
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/elasticsearch.tar.gz -C /opt/ && ls -al /opt/"; done
vim 记得改名和地址
/opt/elasticsearch/bin/elasticsearch -d
tail -100f /opt/elasticsearch/logs/wind_index.log
es-head
wget http://mirror.workplace.com/elasticsearch/elasticsearch-head.tar.gz
tar -zxvf elasticsearch-head.tar.gz
chown sys.sys elasticsearch-head/ -R
su sys
cd /opt/elasticsearch-head/
npm install
#网络不稳定,出现error多尝试几次,时间会比较长,耐心等待。
注:报相关错,可以尝试 npm install phantomjs-prebuilt@2.1.15 --ignore-scripts
npm install grunt-cli --save
npm install grunt --save
vi Gruntfile.js 进去改地址即可
vi src/app/app.js 进去改地址即可
vi _site/app.js 进去改地址即可
nohup /opt/elasticsearch-head/node_modules/grunt/bin/grunt server &
关闭ipv6 不出问题,不需要做这步
编辑文件/etc/sysctl.conf,
vi /etc/sysctl.conf
添加下面的行:
net.ipv6.conf.all.disable_ipv6 =1
net.ipv6.conf.default.disable_ipv6 =1
如果你想要为特定的网卡禁止IPv6,比如,对于eth0,添加下面的行。
vi /etc/sysconfig/network-scripts/ifcfg-eth0
IPV6INIT="no"
保存并退出文件。
执行下面的命令来使设置生效。
sysctl -p
ifconfig 看不到init6即可
重启es要注意
rm -rf data/* logs/*
之后再重启
kibana
wget http://mirror.workplace.com/kibana/kibana-5.2.2-linux-x86_64.tar.gz
tar -zxvf kibana-5.2.2-linux-x86_64.tar.gz
ln -s /opt/kibana-5.2.2-linux-x86_64 /opt/kibana
chown sys.sys kibana-5.2.2-linux-x86_64 -R
chown sys.sys kibana -R
tar -zcvf kibana.tar.gz kibana-5.2.2-linux-x86_64 kibana
for i in {b..c}; do scp kibana.tar.gz 172.24.2.$i:/opt/; done
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/kibana.tar.gz -C /opt/ && ls -al /opt/"; done
logstash filebeat
wget http://mirror.workplace.com/logstash/logstash-5.2.2.tar.gz
wget http://mirror.workplace.com/filebeat/filebeat-5.2.2-linux-x86_64.tar.gz
tar -zxf logstash-5.2.2.tar.gz
tar -zxf filebeat-5.2.2-linux-x86_64.tar.gz
ln -s /opt/logstash-5.2.2 /opt/logstash
ln -s /opt/filebeat-5.2.2-linux-x86_64 /opt/filebeat
chown sys.sys logstash-5.2.2 logstash -R
chown sys.sys filebeat-5.2.2-linux-x86_64 filebeat -R
vi /opt/logstash/etc/logstash.conf
input {
tcp {
port => 5000
type => syslog
}
sys {
port => 5000
type => syslog
}
}
filter {
if [type] == "syslog" { grok { match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(? :\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
output {
elasticsearch { hosts => ["172.24.2.c:9200"] }
stdout { codec => rubydebug }
}
vi /opt/filebeat/filebeat.yml
paths:
- /var/log/*.log
output.elasticsearch:
hosts: ["172.24.2.c:9200"]
output.logstash:
hosts: ["172.24.2.c:5044"]
vi /etc/profile 添加
export NODE_HOME=/opt/node
export PATH=${NODE_HOME}/bin:${PATH}
export ELASTICSEARCH_HOME=/opt/elasticsearch
export PATH=${ELASTICSEARCH_HOME}/bin:${PATH}
export KIBANA_HOME=/opt/kibana
export PATH=${KIBANA_HOME}/bin:${PATH}
export LOGSTASH_HOME=/opt/logstash
export PATH=${LOGSTASH_HOME}/bin:${PATH}
tar -zcf logstash.tar.gz logstash-5.2.2 logstash
tar -zcf filebeat.tar.gz filebeat-5.2.2-linux-x86_64 filebeat
for i in {b..c}; do scp logstash.tar.gz filebeat.tar.gz 172.24.2.$i:/opt/; done
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/logstash.tar.gz -C /opt/ && ls -al /opt/"; done
for i in {b..c}; do ssh 172.24.2.$i "tar xf /opt/filebeat.tar.gz -C /opt/ && ls -al /opt/"; done
for i in {b..c}; do scp /etc/profile 172.24.2.$i:/etc; done
for i in {b..c}; do ssh 172.24.2.$i "source /etc/profile && logstash -V"; done
添加logstash、filebeat配置
su sys
cd /opt/logstash
vi test-pipeline.conf
input {
beats {
port => "9999"
}
}
# The filter part of this file is commented out to indicate that it is
# optional.
# filter {
#
# }
#
filter {
grok {
match => { "message" => ["%{LOGLEVEL:logLevel}\]\[%{NUMBER:nanoTime:integer}\](?<V1>.*)\[jobId=(?<jobId>[\w\d\-_]+)\](?<V2>.*)", "%{LOGLEVEL:logLevel}\]\[%{NUMBER:nanoTime:integer}\](?<V1>.)(?<V2>.*)"] }
add_field => { "nest_msg" => "[%{logLevel}]%{V1}%{V2}" }
remove_field => [ "message", "V1", "V2" ]
}
if ![jobId] {
drop {}
}
mutate {
convert => { "nanoTime" => "integer" }
}
}
output {
stdout {
codec => json
}
elasticsearch {
hosts => [ "172.24.x.a:9200", "172.24.x.b:9200", "172.24.x.c:9200" ]
codec => json
index => "test-logstash-%{+YYYY.MM.dd}"
}
}
vim udp-pipeline.conf 其实退出复制蛮好,可以翻
input {
beats {
port => "9998"
}
}
# The filter part of this file is commented out to indicate that it is
# optional.
# filter {
#
# }
#
filter {
grok {
match => { "message" => ["%{LOGLEVEL:logLevel}\]\[%{NUMBER:nanoTime:integer}\] %{TIMESTAMP_ISO8601:time}.*\[user=(?<user>[\w\d\-_\.]+)\]\[tenant=(?<tenant>[\w\d\-_\.]+)\]\[project=(?<project>[\w\d\-_\.]+)\]\[ip=%{IPORHOST:ip}\]\[page=(?<page>[\w\d\-_\.]+)\]\[user_action=(?<user_action>[\w\d\-_\.]+)\]\[message_key=(?<message_key>[\w\d\-_\.]+)\]\[params=(?<params>.*)\]"] }
remove_field => [ "YEAR", "MONTHNUM", "MONTHDAY", "HOUR", "MINUTE", "SECOND", "HOSTNAME", "IP", "IPV4", "IPV6" ]
}
if ![message_key] {
drop {}
}
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss.SSS" ]
}
mutate {
convert => { "nanoTime" => "integer" }
}
}
output {
stdout {
codec => json
}
elasticsearch {
hosts => [ "172.24.x.a:9200", "172.24.x.b:9200", "172.24.x.c:9200" ]
codec => json
index => "sys-logstash-%{+YYYY.MM.dd}"
}
}
cd /opt/filebeat
vi test-filebeat.yml
filebeat.prospectors:
- input_type: log
multiline.timeout: 1s
paths:
- /opt/test/logs/exec-server*.log
multiline:
pattern: '^\['
negate: true
match: after
# exclude_lines: ["^\\[DEBUG\\]"]
output.logstash:
hosts: ["172.24.2.c:9999"]
vi test-filebeat.yml
filebeat.prospectors:
- input_type: log
multiline.timeout: 1s
paths:
- /opt/test/logs/data-warehouse*.log
multiline:
pattern: '^\['
negate: true
match: after
# exclude_lines: ["^\\[DEBUG\\]"]
output.logstash:
hosts: ["172.24.2.c:9998"]
启动命令(注意配置文件要指定好位置)
nohup bin/logstash -f udp-pipeline.conf &
nohup ./filebeat -e -c udp-filebeat.yml -d publish &
nginx
编译安装
yum -y install make zlib zlib-devel gcc-c++ libtool openssl openssl-devel
wget http://mirror.workplace.com/pcre/pcre-8.35.tar.gz
tar xf pcre-8.35.tar.gz
cd pcre-8.35
./configure
make && make install
pcre-config --version
wget http://mirror.workplace.com/nginx/nginx-1.10.2.tar.gz
tar xf nginx-1.10.2.tar.gz
cd nginx-1.10.2
./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-pcre=/opt/pcre-8.35
make && make install
/usr/local/nginx/sbin/nginx -v
/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
npm安装
mkdir nginx
cd nginx/
wget http://mirror.workplace.com/nginx/nginx-1.13.4-1.el7.ngx.x86_64.rpm
rpm -ivh nginx-1.13.4-1.el7.ngx.x86_64.rpm
vi /etc/nginx/nginx.conf
/usr/sbin/nginx -t
nginx.conf 中http里添加
server {
listen 12175;
server_name 172.24.2.a;
#access_log logs/dw.access.log main;
client_max_body_size 256m;
proxy_read_timeout 1800s; #读取超时 默认为60秒
proxy_send_timeout 1800s; #发送超时 默认为60秒
location ^~ /api/v1 {
proxy_pass http://172.24.2.a:9001;
}
location / {
root /opt/sys-web;
try_files $uri $uri/ /index.html;
}
}
ambari报checkpoint错误
重启hdfs,按提示进行checkpoint
可能是缺少hdfs文件夹,在standby上
建一个即可
mkdir /home/hdfs
cd /home
chown hdfs.hadoop hdfs -R