• 欢迎访问蜷缩的蜗牛博客 蜷缩的蜗牛
  • 微信搜索: 蜷缩的蜗牛 | 联系站长 kbsonlong@qq.com
  • 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏吧

Elk实时日志分析平台5.0版本源码安装配置

ELK 蜷缩的蜗牛 1年前 (2017-07-07) 59次浏览 已收录 1个评论

最近有朋友老说用 RPM 安装 ELK 教程不适用,不通用;很多生产环境不联网,操作系统版本也不同;叫我弄个源码安装的教程。所以就整理一个 ELK5.0 版本源码安装过程,水平有限,凑合着看!!最后附上安装包

一、配置Java环境变量

# mkdir /usr/local/java/ –p  
# cd /usr/local/java/  
# tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz  

# cat >>/etc/profile<<EOF  

export JAVA_HOME=/usr/local/java/jdk1.8.0_111  
export PATH=$PATH:$JAVA_HOME/bin  
exportCLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar  
EOF  

# source /etc/profile  
# java -version  
java version "1.8.0_111"  
Java(TM) SE Runtime Environment (build 1.8.0_111-b14)  
Java HotSpot(TM) 64-Bit Server VM (build 25.111-b14, mixedmode)

二、安装 elasticsearch

 

    # mkdir  /data/PRG/-p  
    # cd /data/PRG/  
    # tar zxvf /data/elk5.0/elasticsearch-5.0.0.tar.gz  
    # mv elasticsearch-5.0.0 elasticsearch  
    # useradd elasticsearch -s /sbin/nologin  
    # chown elasticsearch. elasticsearch /data/PRG/elasticsearch/

添加启动脚本

vi /etc/init.d/elasticsearch

#!/bin/sh  
#  
# elasticsearch <summary>  
#  
# chkconfig:   2345 80 20  
# description: Starts and stops a single elasticsearch instance on this system  
#  


### BEGIN INIT INFO  
# Provides: Elasticsearch  
# Required-Start: $network $named  
# Required-Stop: $network $named  
# Default-Start: 2 3 4 5  
# Default-Stop: 0 1 6  
# Short-Description: This service manages the elasticsearch daemon  
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.  
### END INIT INFO  


#  
# init.d / servicectl compatibility (openSUSE)  
#  
if [ -f /etc/rc.status ]; then  
    . /etc/rc.status  
    rc_reset  
fi  


#  
# Source function library.  
#  
if [ -f /etc/rc.d/init.d/functions ]; then  
    . /etc/rc.d/init.d/functions  
fi  






# Sets the default values for elasticsearch variables used in this script  
export JAVA_HOME=/usr/local/java/jdk1.8.0_111  
ES_USER="elasticsearch"  
ES_GROUP="elasticsearch"  
name=elasticsearch  
ES_HOME="/data/PRG/elasticsearch"  
MAX_OPEN_FILES=65536  
MAX_MAP_COUNT=262144  
LOG_DIR="$ES_HOME/log/"  
DATA_DIR="$ES_HOME/lib/"  
CONF_DIR="$ES_HOME/config"  
mkdir -p $LOG_DIR  
chown -R elasticsearch.elasticsearch $ES_HOME  


PID_DIR="$ES_HOME/log"  


# Source the default env file  
ES_ENV_FILE="/etc/sysconfig/elasticsearch"  
if [ -f "$ES_ENV_FILE" ]; then  
    . "$ES_ENV_FILE"  
fi  


# CONF_FILE setting was removed  
if [ ! -z "$CONF_FILE" ]; then  
    echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."  
    exit 1  
fi  


exec="$ES_HOME/bin/elasticsearch"  
prog="elasticsearch"  
pidfile="$PID_DIR/${prog}.pid"  


export ES_HEAP_SIZE  
export ES_HEAP_NEWSIZE  
export ES_DIRECT_SIZE  
export ES_JAVA_OPTS  
export ES_GC_LOG_FILE  
export ES_STARTUP_SLEEP_TIME  
export JAVA_HOME  
export ES_INCLUDE  
ulimit -n $MAX_OPEN_FILES  
lockfile=$ES_HOME/log/$prog  


# backwards compatibility for old config sysconfig files, pre 0.90.1  
if [ -n $USER ] && [ -z $ES_USER ] ; then  
   ES_USER=$USER  
fi  


checkJava() {  
    if [ -x "$JAVA_HOME/bin/java" ]; then  
        JAVA="$JAVA_HOME/bin/java"  
    else  
        JAVA=`which java`  
    fi  


    if [ ! -x "$JAVA" ]; then  
        echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"  
        exit 1  
    fi  
}  


start() {  
    checkJava  
    [ -x $exec ] || exit 5  
    if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then  
        echo "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"  
        return 7  
    fi  
    if [ -n "$MAX_OPEN_FILES" ]; then  
        ulimit -n $MAX_OPEN_FILES  
    fi  
    if [ -n "$MAX_LOCKED_MEMORY" ]; then  
        ulimit -l $MAX_LOCKED_MEMORY  
    fi  
    if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then  
        sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT  
    fi  
    export ES_GC_LOG_FILE  


    # Ensure that the PID_DIR exists (it is cleaned at OS startup time)  
    if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then  
        mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"  
    fi  
    if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then  
        touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"  
    fi  


    cd $ES_HOME  
    echo -n $"Starting $prog: "  
    # if not running, start it up here, usually something like "daemon $exec"  
    daemon --user $ES_USER  --pidfile $pidfile $exec -p $pidfile -d  


    #daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR  
    retval=$?  
    echo  
    [ $retval -eq 0 ] && touch $lockfile  
    return $retval  
}  


stop() {  
    echo -n $"Stopping $prog: "  
    # stop it here, often "killproc $prog"  
    killproc -p $pidfile -d 86400 $prog  
    retval=$?  
    echo  
    [ $retval -eq 0 ] && rm -f $lockfile  
    return $retval  
}  


restart() {  
    stop  
    start  
}  


reload() {  
    restart  
}  


force_reload() {  
    restart  
}  


rh_status() {  
    # run checks to determine if the service is running or use generic status  
    status -p $pidfile $prog  
}  


rh_status_q() {  
    rh_status >/dev/null 2>&1  
}  




case "$1" in  
    start)  
        rh_status_q && exit 0  
        $1  
        ;;  
    stop)  
        rh_status_q || exit 0  
        $1  
        ;;  
    restart)  
        $1  
        ;;  
    reload)  
        rh_status_q || exit 7  
        $1  
        ;;  
    force-reload)  
        force_reload  
        ;;  
    status)  
        rh_status  
        ;;  
    condrestart|try-restart)  
        rh_status_q || exit 0  
        restart  
        ;;  
    *)  
        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"  
        exit 2  
esac  
exit $?

 

    # chmod +x /etc/init.d/elasticsearch  
    
    # /etc/init.d/elasticsearch start  
    
    # /etc/init.d/elasticsearch status  
    elasticsearch (pid 20895) is running...  
    # netstat -ntlp |grep 9[2-3]00  
    tcp        0     0 :::9300                    :::*                        LISTEN      20895/java           
    tcp        0      0 :::9200                     :::*                        LISTEN      20895/java

三、配置 elasticsearch

内存低于 2G,需要修改 jvm 配置

 

    # vim /data/PRG/elasticsearch/config/jvm.options  
    -Xms512m  
    -Xmx512m  
    
    # cat /data/PRG/elasticsearch/config/elasticsearch.yml|grep -v '#'  
    network.host: 0.0.0.0           ###开启监听地址,  
    action.auto_create_index:.security,.monitoring*,.watches,.triggered_watches,.watcher-history*  
    ####以下模块视情况是否开启  
    xpack.security.enabled: true          ####开启用户认证  
    xpack.monitoring.enabled: true  
    xpack.graph.enabled: true  
    xpack.watcher.enabled: true  
    xpack.security.authc.realms:     ####用户认证模式,ldap、file、pki、ActiveDirectory 等  
        file1:  
          type: file  
          order: 0

四、安装 logstash

# cd /data/PRG/  
# tar zxvf /data/elk5.0/logstash-5.0.0.tar.gz  
# mv logstash-5.0.0 logstash  
# useradd logstash -s /sbin/nologin  
# chown logstash. logstash /data/PRG/logstash

         添加启动脚本

vim /etc/init.d/logstash

#!/bin/sh  
# Init script for logstash  
# Maintained by Elasticsearch  
# Generated by pleaserun.  
# Implemented based on LSB Core 3.1:  
#   * Sections: 20.2, 20.3  
#  
### BEGIN INIT INFO  
# Provides:          logstash  
# Required-Start:    $remote_fs $syslog  
# Required-Stop:     $remote_fs $syslog  
# Default-Start:     2 3 4 5  
# Default-Stop:      0 1 6  
# Short-Description:  
# Description:        Starts Logstash as a daemon.  
### END INIT INFO  

PATH=/sbin:/usr/sbin:/bin:/usr/bin:/data/PRG/logstash/bin  
export PATH  

if [ `id -u` -ne 0 ]; then  
   echo "You need root privileges to run this script"  
   exit 1  
fi  

name=logstash  

LS_USER=logstash  
LS_GROUP=logstash  
LS_HOME=/data/PRG/logstash  
LS_HEAP_SIZE="1g"  
LS_LOG_DIR=/data/PRG/logstash/logs  
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"  
pidfile="${LS_LOG_DIR}/$name.pid"  
LS_CONF_DIR=/data/PRG/logstash/conf.d  
LS_OPEN_FILES=16384  
LS_NICE=19  
KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request  
LS_OPTS=""  


[ -r /etc/default/$name ] && . /etc/default/$name  
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name  

program=$LS_HOME/bin/logstash  
args=" -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"  

quiet() {  
  "$@" > /dev/null 2>&1  
  return $?  
}  

start() {  

  LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"  
  HOME=${LS_HOME}  
  export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE  

  # chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.  
  # Boy, I hope we're root here.  
  SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')  

  if [ ! -z $SGROUPS ]  
  then  
        EXTRA_GROUPS="--groups $SGROUPS"  
  fi  

  # set ulimit as (root, presumably) first, before we drop privileges  
  ulimit -n ${LS_OPEN_FILES}  

  # Run the program!  
  nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "  
    cd $LS_HOME  
    ulimit -n ${LS_OPEN_FILES}  
    $program $args > ${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &  

  # Generate the pidfile from here. If we instead made the forked process  
  # generate it there will be a race condition between the pidfile writing  
  # and a process possibly asking for status.  
  echo $! > $pidfile  

  echo "$name started."  
  return 0  
}  

stop() {  
  # Try a few times to kill TERM the program  
  if status ; then  
    pid=`cat "$pidfile"`  
    echo "Killing $name (pid $pid) with SIGTERM"  
    ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9  
    # Wait for it to exit.  
    for i in 1 2 3 4 5 6 7 8 9 ; do  
      echo "Waiting $name (pid $pid) to die..."  
      status || break  
      sleep 1  
    done  
    if status ; then  
      if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then  
        echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."  
        kill -KILL $pid  
        echo "$name killed with SIGKILL."  
      else  
        echo "$name stop failed; still running."  
        return 1 # stop timed out and not forced  
      fi  
    else  
      echo "$name stopped."  
    fi  
  fi  
}  

status() {  
  if [ -f "$pidfile" ] ; then  
    pid=`cat "$pidfile"`  
    if kill -0 $pid > /dev/null 2> /dev/null ; then  
      # process by this pid is running.  
      # It may not be our pid, but that's what you get with just pidfiles.  
      # TODO(sissel): Check if this process seems to be the same as the one we  
      # expect. It'd be nice to use flock here, but flock uses fork, not exec,  
      # so it makes it quite awkward to use in this case.  
      return 0  
    else  
      return 2 # program is dead but pid file exists  
    fi  
  else  
    return 3 # program is not running  
  fi  
}  

configtest() {  
  # Check if a config file exists  
  if [ ! "$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)" ]; then  
    echo "There aren't any configuration files in ${LS_CONF_DIR}"  
    return 1  
  fi  

  HOME=${LS_HOME}  
  export PATH HOME  

  test_args="-t -f ${LS_CONF_DIR} ${LS_OPTS} "  
  $program ${test_args}  
  [ $? -eq 0 ] && return 0  
  # Program not configured  
  return 6  
}  

case "$1" in  
  start)  
    status  
    code=$?  
    if [ $code -eq 0 ]; then  
      echo "$name is already running"  
    else  
      start  
      code=$?  
    fi  
    exit $code  
    ;;  
  stop) stop ;;  
  force-stop) force_stop ;;  
  status)  
    status  
    code=$?  
    if [ $code -eq 0 ] ; then  
      echo "$name is running"  
    else  
      echo "$name is not running"  
    fi  
    exit $code  
    ;;  
  reload) reload ;;  
  restart)  
    stop && start  
    ;;  
  check)  
    configtest  
    exit $?  
    ;;  
  *)  
    echo "Usage: $SCRIPTNAME {start|stop|status|restart|check}" >&2  
    exit 3  
  ;;  
esac  

exit $?

 

    # chmod +x /etc/init.d/logstash  
    # /etc/init.d/logstash start  
    # /etc/init.d/logstash status    
    logstash is running  
    
    # netstat -ntlp|grep 9600  
    tcp        0      0 :::9600                     :::*                        LISTEN      10141/java

五、配置 logstash

# cat /data/PRG/logstash/config/logstash.yml  |grep -v ‘#’

http.host: “0.0.0.0”       ###开启监听地址

ngin 日志收集

 

    # cat /data/PRG/logstash/conf.d/filter.conf  
    input {  
      beats {  
        port => 10200  
      }  
    }  
    
    filter {  
        grok {       
                match=> {  
                   message => "%{IPORHOST:remote_addr} , 
    , %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , - , - , - ,%{IPORHOST:server_ip} , %{BASE10NUM:request_duration}"  
    
                }  
    
                match=> {  
                   message => "%{IPORHOST:remote_addr} , 
    , %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" ,%{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} ,%{BASE10NUM:request_duration}"  
    
                }  
        }  
    
    }  
    output {  
     elasticsearch {  
       hosts =>["192.168.62.200:9200"]  
       index =>"operation-%{+YYYY.MM.dd}"  
       document_type=> "nginx2"  
       user => 'admin'              #### elasticsearch 的用户名,用 X-PACK 插件创建  
       password =>'kbsonlong'       #### elasticsearch 的用户名  
      }  
     stdout { codec =>rubydebug }  
    }

六、安装 kibana

[plain] view plain copy

print?

  1. # cd /data/PRG/ 
  2. # tar zxvf /data/elk5.0/kibana-5.0.0-linux-x86_64.tar.gz 
  3. # mv kibana-5.0.0-linux-x86_64 kibana 
  4. # useradd kibana –s /sbin/nologin 
  5. # chown kibana. kibana /data/PRG/kibana 

添加启动脚本

# vim /etc/init.d/kibana

#!/bin/sh  
# Init script for kibana  
# Maintained by   
# Generated by pleaserun.  
# Implemented based on LSB Core 3.1:  
#   * Sections: 20.2, 20.3  
#  
### BEGIN INIT INFO  
# Provides:          kibana  
# Required-Start:    $remote_fs $syslog  
# Required-Stop:     $remote_fs $syslog  
# Default-Start:     2 3 4 5  
# Default-Stop:      0 1 6  
# Short-Description:   
# Description:       Kibana  
### END INIT INFO  

PATH=/sbin:/usr/sbin:/bin:/usr/bin  
export PATH  

KIBANA_HOME=/data/PRG/kibana  
name=kibana  
program=$KIBANA_HOME/bin/kibana  
args=''  
pidfile="$KIBANA_HOME/logs/$name.pid"  
LOG_HOME="$KIBANA_HOME/logs"  

[ -r /etc/default/$name ] && . /etc/default/$name  
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name  

[ -z "$nice" ] && nice=0  

trace() {  
  logger -t "/etc/init.d/kibana" "$@"  
}  

emit() {  
  trace "$@"  
  echo "$@"  
}  

start() {  

  # Ensure the log directory is setup correctly.  
  [ ! -d "$LOG_HOME" ] && mkdir "$LOG_HOME"  
  chmod 755 "$LOG_HOME"  


  # Setup any environmental stuff beforehand  


  # Run the program!  

  #chroot --userspec "$user":"$group" "$chroot" sh -c "  

  $program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr &  

  # Generate the pidfile from here. If we instead made the forked process  
  # generate it there will be a race condition between the pidfile writing  
  # and a process possibly asking for status.  
  echo $! > $pidfile  

  emit "$name started"  
  return 0  
}  

stop() {  
  # Try a few times to kill TERM the program  
  if status ; then  
    pid=$(cat "$pidfile")  
    echo "Killing $name (pid $pid) with SIGTERM"  
    ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9  
    # Wait for it to exit.  
    for i in 1 2 3 4 5 ; do  
      trace "Waiting $name (pid $pid) to die..."  
      status || break  
      sleep 1  
    done  
    if status ; then  
      if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; then  
        trace "Timeout reached. Killing $name (pid $pid) with SIGKILL.  This may result in data loss."  
        kill -KILL $pid  
        emit "$name killed with SIGKILL."  
      else  
        emit "$name stop failed; still running."  
      fi  
    else  
      emit "$name stopped."  
    fi  
  fi  
}  

status() {  
  if [ -f "$pidfile" ] ; then  
    pid=$(cat "$pidfile")  
    if ps -p $pid > /dev/null 2> /dev/null ; then  
      # process by this pid is running.  
      # It may not be our pid, but that's what you get with just pidfiles.  
      # TODO(sissel): Check if this process seems to be the same as the one we  
      # expect. It'd be nice to use flock here, but flock uses fork, not exec,  
      # so it makes it quite awkward to use in this case.  
      return 0  
    else  
      return 2 # program is dead but pid file exists  
    fi  
  else  
    return 3 # program is not running  
  fi  
}  




case "$1" in  
  force-start|start|stop|status|restart)  
    trace "Attempting '$1' on kibana"  
    ;;  
esac  

case "$1" in  
  force-start)  
    PRESTART=no  
    exec "$0" start  
    ;;  
  start)  
    status  
    code=$?  
    if [ $code -eq 0 ]; then  
      emit "$name is already running"  
      exit $code  
    else  
      start  
      exit $?  
    fi  
    ;;  
  stop) stop ;;  
  status)  
    status  
    code=$?  
    if [ $code -eq 0 ] ; then  
      emit "$name is running"  
    else  
      emit "$name is not running"  
    fi  
    exit $code  
    ;;  
  restart)  

    stop && start  
    ;;  
  *)  
    echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2  
    exit 3  
  ;;  
esac  

exit $?

 

 

  1. # chmod +x /etc/init.d/kibana 
  2. # /etc/init.d/kibana start 
  3. # /etc/init.d/kibana status 
  4. # netstat -ntlp |grep 5601 
  5. tcp        0      0 0.0.0.0:5601                0.0.0.0:*                   LISTEN      13052/node  

七、配置 kibana

 

  1. # cat /data/PRG/kibana/config/kibana.yml |grep -v ‘#’                                      
  2. server.host: “0.0.0.0” 
  3. ####以下模块视情况是否开启 
  4. xpack.security.enabled: true 
  5. xpack.monitoring.enabled: true 
  6. xpack.graph.enabled: true 
  7. xpack.reporting.enabled: true 

八、x-pack 插件安装

# /data/PRG/kibana/bin/kibana-plugin install file:///root/x-pack-5.0.0.zip

# /data/PRG/elasticsearch/bin/elasticsearch-plugin install file:///root/x-pack-5.0.0.zip

离线安装 x-pack 要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack 目录

# vim /data/PRG/elasticsearch/bin/x-pack/users

否则在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp 不存在

# mkdir /etc/elasticsearch/x-pack/

# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/-R

九、           x-pack 管理用户

1、          添加用户

 

  1. # cd /data/PRG/elasticsearch  
  2. # bin/x-pack/users useradd admin -p kbsonlong -rsuperuser  

2、          查看用户

 

  1. # /data/PRG/elasticsearch/bin/x-pack/users list 
  2. admin          :superuser 
  3. test           : –                ###创建用户时没有添加-r 参数,所以没有用户角色 

3、          测试用户登录

 

  1. # curl http://localhost:9200/_xpack/ –useradmin:kbsonlong  
  2. {“build”:{“hash”:”7763f8e”,”date”:”2016-10-26T04:51:59.202Z”},”license”:{“uid”:”06a82587-66ac-4d4a-90c4-857d9ca7f3bc”,”type”:”trial”,”mode”:”trial”,”status”:”active”,”expiry_date_in_millis”:1483753731066},”features”:{“graph”:{“description”:”GraphData Exploration for the ElasticStack”,”available”:true,”enabled”:true},”monitoring”:{“description”:”Monitoringfor the ElasticStack”,”available”:true,”enabled”:true},”security”:{“description”:”Securityfor the ElasticStack”,”available”:true,”enabled”:true},”watcher”:{“description”:”Alerting,Notification and Automation for the ElasticStack”,”available”:true,”enabled”:true}},”tagline”:”Youknow, for X”} 

4、          删除用户

 

  1. # /data/PRG/elasticsearch/bin/x-pack/users userdel test 
  2. # /data/PRG/elasticsearch/bin/x-pack/users list 
  3. admin          :superuser 

十、安装 filebeat

 

  1. # cd /data/PRG 
  2. # tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz 
  3. # mv filebeat-5.0.0-linux-x86_64  filebeat 

配置启动脚本

# vim /etc/init.d/filebeat

 

  1. #!/bin/bash 
  2. # filebeat          filebeat shipper 
  3. # chkconfig: 2345 98 02 
  4. ### BEGIN INIT INFO 
  5. # Provides:          filebeat 
  6. # Required-Start:    $local_fs $network $syslog 
  7. # Required-Stop:     $local_fs $network $syslog 
  8. # Default-Start:     2 3 4 5 
  9. # Default-Stop:      0 1 6 
  10. # Short-Description: Sends log files to Logstash or directly to Elasticsearch. 
  11. # Description:       filebeat is a shipper part of the Elastic Beats  
  12. #                    family. Please see: https://www.elastic.co/products/beats&nbsp;
  13. ### END INIT INFO 
  14. PATH=/usr/bin:/sbin:/bin:/usr/sbin 
  15. export PATH 
  16. [ -f /etc/sysconfig/filebeat ] && . /etc/sysconfig/filebeat 
  17. pidfile=${PIDFILE-/data/PRG/filebeat/filebeat.pid} 
  18. agent=${PB_AGENT-/data/PRG/filebeat/filebeat} 
  19. args=”-c /data/PRG/filebeat/filebeat.yml” 
  20. test_args=”-e -configtest” 
  21. wrapper=”filebeat-god” 
  22. wrapperopts=”-r / -n -p $pidfile” 
  23. RETVAL=0 
  24. # Source function library. 
  25. . /etc/rc.d/init.d/functions 
  26. # Determine if we can use the -p option to daemon, killproc, and status. 
  27. # RHEL < 5 can’t. 
  28. if status | grep -q — ‘-p’ 2>/dev/null; then 
  29.     daemonopts=”–pidfile $pidfile” 
  30.     pidopts=”-p $pidfile” 
  31. fi 
  32. test() { 
  33.     $agent $args $test_args 
  34. start() { 
  35.     echo -n $”Starting filebeat: ” 
  36.     test 
  37.     if [ $? -ne 0 ]; then 
  38.         echo 
  39.         exit 1 
  40.     fi 
  41.     daemon $daemonopts $wrapper $wrapperopts — $agent $args 
  42.     RETVAL=$? 
  43.     echo 
  44.     return $RETVAL 
  45. stop() { 
  46.     echo -n $”Stopping filebeat: ” 
  47.     killproc $pidopts $wrapper 
  48.     RETVAL=$? 
  49.     echo 
  50.     [ $RETVAL = 0 ] && rm -f ${pidfile} 
  51. restart() { 
  52.     test 
  53.     if [ $? -ne 0 ]; then 
  54.         return 1 
  55.     fi 
  56.     stop 
  57.     start 
  58. rh_status() { 
  59.     status $pidopts $wrapper 
  60.     RETVAL=$? 
  61.     return $RETVAL 
  62. rh_status_q() { 
  63.     rh_status >/dev/null 2>&1 
  64. case “$1” in 
  65.     start) 
  66.         start 
  67.     ;; 
  68.     stop) 
  69.         stop 
  70.     ;; 
  71.     restart) 
  72.         restart 
  73.     ;; 
  74.     condrestart|try-restart) 
  75.         rh_status_q || exit 0 
  76.         restart 
  77.     ;; 
  78.     status) 
  79.         rh_status 
  80.     ;; 
  81.     *) 
  82.         echo $”Usage: $0 {start|stop|status|restart|condrestart}” 
  83.         exit 1 
  84. esac 
  85. exit $RETVAL 

配置 filebeat

[plain] view plain copy

print?

  1. # cat filebeat/filebeat.yml |grep -v ‘#’ 
  2. filebeat.prospectors: 
  3. – input_type: log 
  4. paths: 
  5.     -/tmp/nginx.log 
  6. output.logstash: 
  7. enabled: true 
  8. hosts: [“localhost:10200”] 

启动 filebeat

[plain] view plain copy

print?

  1. # /etc/init.d/filebeat5 start 
  2. Starting filebeat: 2016/12/0807:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path:[/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path:[/data/PRG/filebeat/logs] 
  3. 2016/12/08 07:18:37.177681 beat.go:174:INFO Setup Beat: filebeat; Version: 5.0.0 
  4. 2016/12/08 07:18:37.177760 logstash.go:90:INFO Max Retries set to: 3 
  5. 2016/12/08 07:18:37.177828 outputs.go:106:INFO Activated logstash as output plugin. 
  6. 2016/12/08 07:18:37.177912 publish.go:291:INFO Publisher name: operation 
  7. 2016/12/08 07:18:37.178158 async.go:63:INFO Flush Interval set to: 1s 
  8. 2016/12/08 07:18:37.178170 async.go:64:INFO Max Bulk Size set to: 2048 
  9. Config OK 
  10.                                                           [  OK  ] 
  11. # /etc/init.d/filebeat5 status 
  12. filebeat-god (pid  7365) is running… 
  13. # ps -ef |grep filebeat 
  14. root     7405     1  0 15:18 pts/1    00:00:00 filebeat-god -r / -n -p/data/PRG/filebeat/filebeat.pid — /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml 
  15. root     7406  7405  0 15:18 pts/1    00:00:00 /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml 

附上安装源码包,包括 x-pack、beat 等

百度云盘http://pan.baidu.com/s/1skT4zCx


蜷缩的蜗牛 , 版权所有丨如未注明 , 均为原创丨 转载请注明Elk 实时日志分析平台 5.0 版本源码安装配置
喜欢 (0)
[]
分享 (0)

您必须 登录 才能发表评论!

(1)个小伙伴在吐槽
  1. 蜷缩的蜗牛
    启动错误参考https://blog.csdn.net/qq942477618/article/details/53414983