########################ElasticSearch#######################
环境:
192.168.125.200 elasticsearch+logstash+kibana node-1
192.168.125.201 elasticsearch+logstash node-2
192.168.125.202 nginx+Filebeat node-3
192.168.125.203 nginx+filebeat node-4
配置:
主机名互相解析
环境:
192.168.125.200 elasticsearch+logstash+kibana node-1
192.168.125.201 elasticsearch+logstash node-2
192.168.125.202 nginx+Filebeat node-3
192.168.125.203 nginx+filebeat node-4
配置:
主机名互相解析
1,安装elasticsearch。如果是实验环境需要配置虚拟机内存为1G。
(1)配置yum环境,安装elk6的版本。
[elk-6]
name=elk-6
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
(2)配置打开文件句柄数。
[root@localhost ~]# cat /etc/security/limits.conf
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
[root@localhost ~]# cat /etc/security/limits.d/90-nproc.conf
* soft nproc 4096 //修改大于2048
root soft nproc unlimited
(3)进行yum安装elasticsearch,需要安装java8,请自行安装,此处不做详细。
(1)配置yum环境,安装elk6的版本。
[elk-6]
name=elk-6
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
(2)配置打开文件句柄数。
[root@localhost ~]# cat /etc/security/limits.conf
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
[root@localhost ~]# cat /etc/security/limits.d/90-nproc.conf
* soft nproc 4096 //修改大于2048
root soft nproc unlimited
(3)进行yum安装elasticsearch,需要安装java8,请自行安装,此处不做详细。
(4)配置elasticsearch.
修改jvm内存:
[root@localhost ~]# cat /etc/elasticsearch/jvm.options
## GC configuration
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
-XX:ParallelGCThreads=1 //指定并行GC线程数。
配置elasticsearch文件:200和201都是一样的配置。
[root@localhost ~]# grep -v ^#.* /etc/elasticsearch/elasticsearch.yml
cluster.name: ELK
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 192.168.125.200
discovery.zen.ping.unicast.hosts: ["192.168.125.200", "192.168.125.201"]
启动elasticsearch:
[root@localhost ~]# ss -tunlp
9200/9300
9300是tcp通讯端口,集群间和TCPClient都走的它,用于事务通信,9200是http协议的RESTful接口
修改jvm内存:
[root@localhost ~]# cat /etc/elasticsearch/jvm.options
## GC configuration
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
-XX:ParallelGCThreads=1 //指定并行GC线程数。
配置elasticsearch文件:200和201都是一样的配置。
[root@localhost ~]# grep -v ^#.* /etc/elasticsearch/elasticsearch.yml
cluster.name: ELK
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 192.168.125.200
discovery.zen.ping.unicast.hosts: ["192.168.125.200", "192.168.125.201"]
启动elasticsearch:
[root@localhost ~]# ss -tunlp
9200/9300
9300是tcp通讯端口,集群间和TCPClient都走的它,用于事务通信,9200是http协议的RESTful接口
查看集群状态方法:
1,[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cat/nodes?v'
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.125.201 16 93 93 2.55 1.54 0.64 mdi - node-2
192.168.125.200 17 77 90 2.85 1.64 0.67 mdi * node-1
1,[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cat/nodes?v'
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.125.201 16 93 93 2.55 1.54 0.64 mdi - node-2
192.168.125.200 17 77 90 2.85 1.64 0.67 mdi * node-1
2,[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cluster/state/nodes?pretty'
{
"cluster_name" : "ELK",
"compressed_size_in_bytes" : 287,
"nodes" : {
"22ItZADpTjym_G0aQeahqg" : {
"name" : "node-2",
"ephemeral_id" : "RSPI-3_gQnKhN0OH-FslHw",
"transport_address" : "192.168.125.201:9300",
"attributes" : { }
},
"G63XiHorRr2PHDAWUaOmuQ" : {
"name" : "node-1",
"ephemeral_id" : "KeuW_0oEQBOaQjn5CU4jzw",
"transport_address" : "192.168.125.200:9300",
"attributes" : { }
}
}
}
3,查询master
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cluster/state/master_node?pretty'
{
"cluster_name" : "ELK",
"compressed_size_in_bytes" : 287,
"master_node" : "G63XiHorRr2PHDAWUaOmuQ"
}
4,查看集群状态
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cluster/health?pretty'
{
"cluster_name" : "ELK",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cat/health?v'
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1522313584 16:53:04 ELK green 2 2 0 0 0 0 0 0 - 100.0%
{
"cluster_name" : "ELK",
"compressed_size_in_bytes" : 287,
"nodes" : {
"22ItZADpTjym_G0aQeahqg" : {
"name" : "node-2",
"ephemeral_id" : "RSPI-3_gQnKhN0OH-FslHw",
"transport_address" : "192.168.125.201:9300",
"attributes" : { }
},
"G63XiHorRr2PHDAWUaOmuQ" : {
"name" : "node-1",
"ephemeral_id" : "KeuW_0oEQBOaQjn5CU4jzw",
"transport_address" : "192.168.125.200:9300",
"attributes" : { }
}
}
}
3,查询master
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cluster/state/master_node?pretty'
{
"cluster_name" : "ELK",
"compressed_size_in_bytes" : 287,
"master_node" : "G63XiHorRr2PHDAWUaOmuQ"
}
4,查看集群状态
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cluster/health?pretty'
{
"cluster_name" : "ELK",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
[root@node-1 ~]# curl -XGET 'http://192.168.125.200:9200/_cat/health?v'
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1522313584 16:53:04 ELK green 2 2 0 0 0 0 0 0 - 100.0%