• 欢迎访问蜷缩的蜗牛博客 蜷缩的蜗牛
  • 微信搜索: 蜷缩的蜗牛 | 联系站长 kbsonlong@qq.com
  • 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏吧

Elk实时日志分析平台5.0版本源码安装配置

Along 蜷缩的蜗牛 2年前 (2016-12-09) 37次浏览 已收录 0个评论

目录

一、 安装 JAVA. 1

二、 安装 elasticsearch. 2

三、 配置 elasticsearch. 2

四、 安装 logstash. 3

五、 配置 logstash. 3

六、 安装 kibana. 4

七、 配置 kibana. 5

八、 安装 x-pack 插件… 5

九、 x-pack 管理用户… 6

1、 添加用户… 6

2、 查看用户… 6

3、 测试用户登录… 6

4、 删除用户… 6

十、 安装 filebeat. 7

一、 安装 JAVA

# mkdir /usr/local/java/ –p

# cd /usr/local/java/

# tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz

# cat >>/etc/profile<<EOF

export JAVA_HOME=/usr/local/java/jdk1.8.0_111

export PATH=$PATH:$JAVA_HOME/bin

export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

EOF

# source /etc/profile

# java -version

java version “1.8.0_111”

Java(TM) SE Runtime Environment (build 1.8.0_111-b14)

Java HotSpot(TM) 64-Bit Server VM (build 25.111-b14, mixed mode)

二、 安装 elasticsearch

# mkdir /data/PRG/ -p

# cd /data/PRG/

# tar zxvf /data/elk5.0/elasticsearch-5.0.2.tar.gz

# mv elasticsearch-5.0.2 elasticsearch

# useradd elasticsearch -s /sbin/nologin

# chown elasticsearch. elasticsearch /data/PRG/elasticsearch/

添加启动脚本

vi /etc/init.d/elasticsearch

#!/bin/sh
#
# elasticsearch <summary>
#
# chkconfig:   2345 80 20
# description: Starts and stops a single elasticsearch instance on this system
#

### BEGIN INIT INFO
# Provides: Elasticsearch
# Required-Start: $network $named
# Required-Stop: $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: This service manages the elasticsearch daemon
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
### END INIT INFO

#
# init.d / servicectl compatibility (openSUSE)
#
if [ -f /etc/rc.status ]; then
    . /etc/rc.status
    rc_reset
fi

#
# Source function library.
#
if [ -f /etc/rc.d/init.d/functions ]; then
    . /etc/rc.d/init.d/functions
fi

 

# Sets the default values for elasticsearch variables used in this script
export JAVA_HOME=/usr/local/java/jdk1.8.0_111
ES_USER=”elasticsearch”
ES_GROUP=”elasticsearch”
name=elasticsearch
ES_HOME=”/data/PRG/elasticsearch”
MAX_OPEN_FILES=65536
MAX_MAP_COUNT=262144
LOG_DIR=”$ES_HOME/log/”
DATA_DIR=”$ES_HOME/lib/”
CONF_DIR=”$ES_HOME/config”
mkdir -p $LOG_DIR
chown -R elasticsearch.elasticsearch $ES_HOME

PID_DIR=”$ES_HOME/log”

# Source the default env file
ES_ENV_FILE=”/etc/sysconfig/elasticsearch”
if [ -f “$ES_ENV_FILE” ]; then
    . “$ES_ENV_FILE”
fi

# CONF_FILE setting was removed
if [ ! -z “$CONF_FILE” ]; then
    echo “CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.”
    exit 1
fi

exec=”$ES_HOME/bin/elasticsearch”
prog=”elasticsearch”
pidfile=”$PID_DIR/${prog}.pid”

export ES_HEAP_SIZE
export ES_HEAP_NEWSIZE
export ES_DIRECT_SIZE
export ES_JAVA_OPTS
export ES_GC_LOG_FILE
export ES_STARTUP_SLEEP_TIME
export JAVA_HOME
export ES_INCLUDE

lockfile=$ES_HOME/log/$prog

# backwards compatibility for old config sysconfig files, pre 0.90.1
if [ -n $USER ] && [ -z $ES_USER ] ; then
   ES_USER=$USER
fi

checkJava() {
    if [ -x “$JAVA_HOME/bin/java” ]; then
        JAVA=”$JAVA_HOME/bin/java”
    else
        JAVA=`which java`
    fi

    if [ ! -x “$JAVA” ]; then
        echo “Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME”
        exit 1
    fi
}

start() {
    checkJava
    [ -x $exec ] || exit 5
    if [ -n “$MAX_LOCKED_MEMORY” -a -z “$ES_HEAP_SIZE” ]; then
        echo “MAX_LOCKED_MEMORY is set – ES_HEAP_SIZE must also be set”
        return 7
    fi
    if [ -n “$MAX_OPEN_FILES” ]; then
        ulimit -n $MAX_OPEN_FILES
    fi
    if [ -n “$MAX_LOCKED_MEMORY” ]; then
        ulimit -l $MAX_LOCKED_MEMORY
    fi
    if [ -n “$MAX_MAP_COUNT” -a -f /proc/sys/vm/max_map_count ]; then
        sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
    fi
    export ES_GC_LOG_FILE

    # Ensure that the PID_DIR exists (it is cleaned at OS startup time)
    if [ -n “$PID_DIR” ] && [ ! -e “$PID_DIR” ]; then
        mkdir -p “$PID_DIR” && chown “$ES_USER”:”$ES_GROUP” “$PID_DIR”
    fi
    if [ -n “$pidfile” ] && [ ! -e “$pidfile” ]; then
        touch “$pidfile” && chown “$ES_USER”:”$ES_GROUP” “$pidfile”
    fi

    cd $ES_HOME
    echo -n $”Starting $prog: ”
    # if not running, start it up here, usually something like “daemon $exec”
    daemon –user $ES_USER  –pidfile $pidfile $exec -p $pidfile -d

    #daemon –user $ES_USER –pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR
    retval=$?
    echo
    [ $retval -eq 0 ] && touch $lockfile
    return $retval
}

stop() {
    echo -n $”Stopping $prog: ”
    # stop it here, often “killproc $prog”
    killproc -p $pidfile -d 86400 $prog
    retval=$?
    echo
    [ $retval -eq 0 ] && rm -f $lockfile
    return $retval
}

restart() {
    stop
    start
}

reload() {
    restart
}

force_reload() {
    restart
}

rh_status() {
    # run checks to determine if the service is running or use generic status
    status -p $pidfile $prog
}

rh_status_q() {
    rh_status >/dev/null 2>&1
}

case “$1” in
    start)
        rh_status_q && exit 0
        $1
        ;;
    stop)
        rh_status_q || exit 0
        $1
        ;;
    restart)
        $1
        ;;
    reload)
        rh_status_q || exit 7
        $1
        ;;
    force-reload)
        force_reload
        ;;
    status)
        rh_status
        ;;
    condrestart|try-restart)
        rh_status_q || exit 0
        restart
        ;;
    *)
        echo $”Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}”
        exit 2
esac
exit $?

 

# chmod +x /etc/init.d/elasticsearch

# /etc/init.d/elasticsearch start

# /etc/init.d/elasticsearch status

elasticsearch (pid 20895) is running…

# netstat -ntlp |grep 9[2-3]00

tcp 0 0 :::9300 :::* LISTEN 20895/java

tcp 0 0 :::9200 :::* LISTEN 20895/java

三、 配置 elasticsearch

内存低于 2G,需要修改 jvm 配置

-Xms512m

-Xmx512m

# cat /data/PRG/elasticsearch/config/elasticsearch.yml |grep -v ‘#’

network.host: 0.0.0.0 ###开启监听地址,

action.auto_create_index: .security,.monitoring*,.watches,.triggered_watches,.watcher-history*

####以下模块视情况是否开启

xpack.security.enabled: true ####开启用户认证

xpack.monitoring.enabled: true

xpack.graph.enabled: true

xpack.watcher.enabled: true

xpack.security.authc.realms: ####用户认证模式,ldap、file、pki、Active Directory 等

file1:

type: file

order: 0

四、 安装 logstash

# cd /data/PRG/

# tar zxvf /data/elk5.0/logstash-5.0.2.tar.gz

# mv logstash-5.0.2 logstash

# useradd logstash -s /sbin/nologin

# chown logstash. logstash /data/PRG/logstash

添加启动脚本

vim /etc/init.d/logstash

#!/bin/sh
# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
#   * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides:          logstash
# Required-Start:    $remote_fs $syslog
# Required-Stop:     $remote_fs $syslog
# Default-Start:     2 3 4 5
# Default-Stop:      0 1 6
# Short-Description:
# Description:        Starts Logstash as a daemon.
### END INIT INFO

PATH=/sbin:/usr/sbin:/bin:/usr/bin:$LS_HOME/bin
export PATH

if [ `id -u` -ne 0 ]; then
   echo “You need root privileges to run this script”
   exit 1
fi

name=logstash

export JAVA_HOME=/usr/local/java/jdk1.8.0_111
LS_USER=logstash
LS_GROUP=logstash
LS_HOME=/data/PRG/logstash
LS_HEAP_SIZE=”1g”
LS_LOG_DIR=$LS_HOME/logs
LS_LOG_FILE=”${LS_LOG_DIR}/$name.log”
pidfile=”${LS_LOG_DIR}/$name.pid”
LS_CONF_DIR=$LS_HOME/conf.d
LS_OPEN_FILES=16384
LS_NICE=19
KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request
LS_OPTS=””

mkdir -p $LS_LOG_DIR
chown -R logstash.logstash $LS_HOME

[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name

program=$LS_HOME/bin/logstash
args=” -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}”

quiet() {
  “$@” > /dev/null 2>&1
  return $?
}

start() {

  LS_JAVA_OPTS=”${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}”
  HOME=${LS_HOME}
  export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE

  # chown doesn’t grab the suplimental groups when setting the user:group – so we have to do it for it.
  # Boy, I hope we’re root here.
  SGROUPS=$(id -Gn “$LS_USER” | tr ” ” “,” | sed ‘s/,$//’; echo ”)

  if [ ! -z $SGROUPS ]
  then
        EXTRA_GROUPS=”–groups $SGROUPS”
  fi

  # set ulimit as (root, presumably) first, before we drop privileges
  ulimit -n ${LS_OPEN_FILES}

  # Run the program!
  nice -n ${LS_NICE} chroot –userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c ”
    cd $LS_HOME
    ulimit -n ${LS_OPEN_FILES}
    $program $args > ${LS_LOG_DIR}/$name.stdout” 2> “${LS_LOG_DIR}/$name.err” &

  # Generate the pidfile from here. If we instead made the forked process
  # generate it there will be a race condition between the pidfile writing
  # and a process possibly asking for status.
  echo $! > $pidfile

  echo “$name started.”
  return 0
}

stop() {
  # Try a few times to kill TERM the program
  if status ; then
    pid=`cat “$pidfile”`
    echo “Killing $name (pid $pid) with SIGTERM”
    ps -ef |grep $pid |grep -v ‘grep’ |awk ‘{print $2}’ | xargs kill -9
    # Wait for it to exit.
    for i in 1 2 3 4 5 6 7 8 9 ; do
      echo “Waiting $name (pid $pid) to die…”
      status || break
      sleep 1
    done
    if status ; then
      if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then
        echo “Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss.”
        kill -KILL $pid
        echo “$name killed with SIGKILL.”
      else
        echo “$name stop failed; still running.”
        return 1 # stop timed out and not forced
      fi
    else
      echo “$name stopped.”
    fi
  fi
}

status() {
  if [ -f “$pidfile” ] ; then
    pid=`cat “$pidfile”`
    if kill -0 $pid > /dev/null 2> /dev/null ; then
      # process by this pid is running.
      # It may not be our pid, but that’s what you get with just pidfiles.
      # TODO(sissel): Check if this process seems to be the same as the one we
      # expect. It’d be nice to use flock here, but flock uses fork, not exec,
      # so it makes it quite awkward to use in this case.
      return 0
    else
      return 2 # program is dead but pid file exists
    fi
  else
    return 3 # program is not running
  fi
}

configtest() {
  # Check if a config file exists
  if [ ! “$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)” ]; then
    echo “There aren’t any configuration files in ${LS_CONF_DIR}”
    return 1
  fi

  HOME=${LS_HOME}
  export PATH HOME

  test_args=”-t -f ${LS_CONF_DIR} ${LS_OPTS} ”
  $program ${test_args}
  [ $? -eq 0 ] && return 0
  # Program not configured
  return 6
}

case “$1” in
  start)
    status
    code=$?
    if [ $code -eq 0 ]; then
      echo “$name is already running”
    else
      start
      code=$?
    fi
    exit $code
    ;;
  stop) stop ;;
  force-stop) force_stop ;;
  status)
    status
    code=$?
    if [ $code -eq 0 ] ; then
      echo “$name is running”
    else
      echo “$name is not running”
    fi
    exit $code
    ;;
  reload) reload ;;
  restart)
    stop && start
    ;;
  check)
    configtest
    exit $?
    ;;
  *)
    echo “Usage: $SCRIPTNAME {start|stop|status|restart|check}” >&2
    exit 3
  ;;
esac

exit $?

 

# chmod +x /etc/init.d/logstash

# /etc/init.d/logstash start

# /etc/init.d/logstash status

logstash is running

# netstat -ntlp|grep 9600

tcp 0 0 :::9600 :::* LISTEN 10141/java

五、 配置 logstash

# cat /data/PRG/logstash/config/logstash.yml |grep -v ‘#’

http.host: “0.0.0.0” ###开启监听地址

ngin 日志收集

# cat /data/PRG/logstash/conf.d/filter.conf

input {

beats {

port => 10200

}

}

filter {

grok {

match => {

message => “%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \”%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\” , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} , \”%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\” , – , – , – , %{IPORHOST:server_ip} , %{BASE10NUM:request_duration}”

}

match => {

message => “%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \”%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\” , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} , \”%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\” , %{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} , %{BASE10NUM:request_duration}”

}

}

}

output {

elasticsearch {

hosts => [“192.168.62.200:9200”]

index => “operation-%{+YYYY.MM.dd}”

document_type => “nginx2”

user => ‘admin’ #### elasticsearch 的用户名,用 X-PACK 插件创建

password => ‘kbsonlong’ #### elasticsearch 的用户名

}

stdout { codec => rubydebug }

}

六、 安装 kibana

# cd /data/PRG/

# tar zxvf /data/elk5.0/kibana-5.0.2-linux-x86_64.tar.gz

# mv kibana-5.0.2-linux-x86_64 kibana

# useradd kibana –s /sbin/nologin

# chown kibana. kibana /data/PRG/kibana

添加启动脚本

# vim /etc/init.d/kibana

 

#!/bin/sh
# Init script for kibana
# Maintained by
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
#   * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides:          kibana
# Required-Start:    $remote_fs $syslog
# Required-Stop:     $remote_fs $syslog
# Default-Start:     2 3 4 5
# Default-Stop:      0 1 6
# Short-Description:
# Description:       Kibana
### END INIT INFO

PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH

KIBANA_HOME=/data/PRG/kibana
name=kibana
program=$KIBANA_HOME/bin/kibana
args=”
pidfile=”$KIBANA_HOME/logs/$name.pid”
LOG_HOME=”$KIBANA_HOME/logs”

[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name

[ -z “$nice” ] && nice=0

trace() {
  logger -t “/etc/init.d/kibana” “$@”
}

emit() {
  trace “$@”
  echo “$@”
}

start() {

  # Ensure the log directory is setup correctly.
  [ ! -d “$LOG_HOME” ] && mkdir “$LOG_HOME”
  chmod 755 “$LOG_HOME”

  # Setup any environmental stuff beforehand
 

  # Run the program!
 
  #chroot –userspec “$user”:”$group” “$chroot” sh -c ”
   
  $program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr &

  # Generate the pidfile from here. If we instead made the forked process
  # generate it there will be a race condition between the pidfile writing
  # and a process possibly asking for status.
  echo $! > $pidfile

  emit “$name started”
  return 0
}

stop() {
  # Try a few times to kill TERM the program
  if status ; then
    pid=$(cat “$pidfile”)
    echo “Killing $name (pid $pid) with SIGTERM”
    ps -ef |grep $pid |grep -v ‘grep’ |awk ‘{print $2}’ | xargs kill -9
    # Wait for it to exit.
    for i in 1 2 3 4 5 ; do
      trace “Waiting $name (pid $pid) to die…”
      status || break
      sleep 1
    done
    if status ; then
      if [ “$KILL_ON_STOP_TIMEOUT” -eq 1 ] ; then
        trace “Timeout reached. Killing $name (pid $pid) with SIGKILL.  This may result in data loss.”
        kill -KILL $pid
        emit “$name killed with SIGKILL.”
      else
        emit “$name stop failed; still running.”
      fi
    else
      emit “$name stopped.”
    fi
  fi
}

status() {
  if [ -f “$pidfile” ] ; then
    pid=$(cat “$pidfile”)
    if ps -p $pid > /dev/null 2> /dev/null ; then
      # process by this pid is running.
      # It may not be our pid, but that’s what you get with just pidfiles.
      # TODO(sissel): Check if this process seems to be the same as the one we
      # expect. It’d be nice to use flock here, but flock uses fork, not exec,
      # so it makes it quite awkward to use in this case.
      return 0
    else
      return 2 # program is dead but pid file exists
    fi
  else
    return 3 # program is not running
  fi
}

 

case “$1” in
  force-start|start|stop|status|restart)
    trace “Attempting ‘$1’ on kibana”
    ;;
esac

case “$1” in
  force-start)
    PRESTART=no
    exec “$0” start
    ;;
  start)
    status
    code=$?
    if [ $code -eq 0 ]; then
      emit “$name is already running”
      exit $code
    else
      start
      exit $?
    fi
    ;;
  stop) stop ;;
  status)
    status
    code=$?
    if [ $code -eq 0 ] ; then
      emit “$name is running”
    else
      emit “$name is not running”
    fi
    exit $code
    ;;
  restart)
   
    stop && start
    ;;
  *)
    echo “Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}” >&2
    exit 3
  ;;
esac

exit $?

 

# chmod +x /etc/init.d/kibana

# /etc/init.d/kibana start

# /etc/init.d/kibana status

# netstat -ntlp |grep 5601

tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 13052/node

七、 配置 kibana

# cat /data/PRG/kibana/config/kibana.yml |grep -v ‘#’

server.host: “0.0.0.0”

####以下模块视情况是否开启

xpack.security.enabled: true

xpack.monitoring.enabled: true

xpack.graph.enabled: true

xpack.reporting.enabled: true

八、 安装 x-pack插件

# /data/PRG/kibana/bin/kibana-plugin install file:///root/x-pack-5.0.0.zip

# /data/PRG/elasticsearch/bin/elasticsearch-plugin install file:///root/x-pack-5.0.0.zip

离线安装 x-pack 要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack 目录

# vim /data/PRG/elasticsearch/bin/x-pack/users

#!/bin/sh
# Init script for kibana
# Maintained by
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
#   * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides:          kibana
# Required-Start:    $remote_fs $syslog
# Required-Stop:     $remote_fs $syslog
# Default-Start:     2 3 4 5
# Default-Stop:      0 1 6
# Short-Description:
# Description:       Kibana
### END INIT INFO

PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH

KIBANA_HOME=/data/PRG/kibana
name=kibana
program=$KIBANA_HOME/bin/kibana
args=”
pidfile=”$KIBANA_HOME/logs/$name.pid”
LOG_HOME=”$KIBANA_HOME/logs”

[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name

[ -z “$nice” ] && nice=0

trace() {
  logger -t “/etc/init.d/kibana” “$@”
}

emit() {
  trace “$@”
  echo “$@”
}

start() {

  # Ensure the log directory is setup correctly.
  [ ! -d “$LOG_HOME” ] && mkdir “$LOG_HOME”
  chmod 755 “$LOG_HOME”

  # Setup any environmental stuff beforehand
 

  # Run the program!
 
  #chroot –userspec “$user”:”$group” “$chroot” sh -c ”
   
  $program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr &

  # Generate the pidfile from here. If we instead made the forked process
  # generate it there will be a race condition between the pidfile writing
  # and a process possibly asking for status.
  echo $! > $pidfile

  emit “$name started”
  return 0
}

stop() {
  # Try a few times to kill TERM the program
  if status ; then
    pid=$(cat “$pidfile”)
    echo “Killing $name (pid $pid) with SIGTERM”
    ps -ef |grep $pid |grep -v ‘grep’ |awk ‘{print $2}’ | xargs kill -9
    # Wait for it to exit.
    for i in 1 2 3 4 5 ; do
      trace “Waiting $name (pid $pid) to die…”
      status || break
      sleep 1
    done
    if status ; then
      if [ “$KILL_ON_STOP_TIMEOUT” -eq 1 ] ; then
        trace “Timeout reached. Killing $name (pid $pid) with SIGKILL.  This may result in data loss.”
        kill -KILL $pid
        emit “$name killed with SIGKILL.”
      else
        emit “$name stop failed; still running.”
      fi
    else
      emit “$name stopped.”
    fi
  fi
}

status() {
  if [ -f “$pidfile” ] ; then
    pid=$(cat “$pidfile”)
    if ps -p $pid > /dev/null 2> /dev/null ; then
      # process by this pid is running.
      # It may not be our pid, but that’s what you get with just pidfiles.
      # TODO(sissel): Check if this process seems to be the same as the one we
      # expect. It’d be nice to use flock here, but flock uses fork, not exec,
      # so it makes it quite awkward to use in this case.
      return 0
    else
      return 2 # program is dead but pid file exists
    fi
  else
    return 3 # program is not running
  fi
}

 

case “$1” in
  force-start|start|stop|status|restart)
    trace “Attempting ‘$1’ on kibana”
    ;;
esac

case “$1” in
  force-start)
    PRESTART=no
    exec “$0” start
    ;;
  start)
    status
    code=$?
    if [ $code -eq 0 ]; then
      emit “$name is already running”
      exit $code
    else
      start
      exit $?
    fi
    ;;
  stop) stop ;;
  status)
    status
    code=$?
    if [ $code -eq 0 ] ; then
      emit “$name is running”
    else
      emit “$name is not running”
    fi
    exit $code
    ;;
  restart)
   
    stop && start
    ;;
  *)
    echo “Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}” >&2
    exit 3
  ;;
esac

exit $?

[root@pcgames-monitor-vm234-158 elk]#
[root@pcgames-monitor-vm234-158 elk]# cat /data/PRG/elasticsearch/bin/x-pack/users
#!/bin/bash

SCRIPT=”$0″

# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h “$SCRIPT” ] ; do
  ls=`ls -ld “$SCRIPT”`
  # Drop everything prior to ->
  link=`expr “$ls” : ‘.*-> \(.*\)$’`
  if expr “$link” : ‘/.*’ > /dev/null; then
    SCRIPT=”$link”
  else
    SCRIPT=`dirname “$SCRIPT”`/”$link”
  fi
done

# determine elasticsearch home
ES_HOME=`dirname “$SCRIPT”`/../..

# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd “$ES_HOME”; pwd`

# If an include wasn’t specified in the environment, then search for one…
if [ “x$ES_INCLUDE” = “x” ]; then
    # Locations (in order) to use when searching for an include file.
    for include in /usr/share/elasticsearch/elasticsearch.in.sh \
                   /usr/local/share/elasticsearch/elasticsearch.in.sh \
                   /opt/elasticsearch/elasticsearch.in.sh \
                   ~/.elasticsearch.in.sh \
                   “`dirname “$0″`”/../elasticsearch.in.sh \
                   “$ES_HOME/bin/elasticsearch.in.sh”; do
        if [ -r “$include” ]; then
            . “$include”
            break
        fi
    done
# …otherwise, source the specified include.
elif [ -r “$ES_INCLUDE” ]; then
    . “$ES_INCLUDE”
fi

if [ -x “$JAVA_HOME/bin/java” ]; then
    JAVA=”$JAVA_HOME/bin/java”
else
    JAVA=`which java`
fi

if [ ! -x “$JAVA” ]; then
    echo “Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME”
    exit 1
fi

if [ -z “$ES_CLASSPATH” ]; then
    echo “You must set the ES_CLASSPATH var” >&2
    exit 1
fi

if [ -z “$CONF_DIR” ]; then
    # Try to read package config files
    if [ -f “/etc/sysconfig/elasticsearch” ]; then
        CONF_DIR=/etc/elasticsearch

        . “/etc/sysconfig/elasticsearch”
    elif [ -f “/etc/default/elasticsearch” ]; then
        CONF_DIR=/etc/elasticsearch

       . “/etc/default/elasticsearch”
    fi
fi

export HOSTNAME=`hostname -s`

# include x-pack jars in classpath
ES_CLASSPATH=”$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*”

# don’t let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ “x$JAVA_TOOL_OPTIONS” != “x” ]; then
    echo “Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS”
    echo “Please pass JVM parameters via ES_JAVA_OPTS instead”
    unset JAVA_TOOL_OPTIONS
fi

# CONF_FILE setting was removed
if [ ! -z “$CONF_FILE” ]; then
    echo “CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.”
    exit 1
fi

declare -a args=(“$@”)

if [ -e “$CONF_DIR” ]; then
  args=(“${args[@]}” -Edefault.path.conf=”$CONF_DIR”)
fi

cd “$ES_HOME” > /dev/null
“$JAVA” $ES_JAVA_OPTS -cp “$ES_CLASSPATH” -Des.path.home=”$ES_HOME” org.elasticsearch.xpack.security.authc.file.tool.UsersTool “${args[@]}”
status=$?
cd – > /dev/null
exit $status

 

否则在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp 不存在

# mkdir /etc/elasticsearch/x-pack/

# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/ -R

九、 x-pack 管理用户

1、 添加用户

# cd /data/PRG/elasticsearch

# bin/x-pack/users useradd admin -p kbsonlong -r superuser

2、 查看用户

# /data/PRG/elasticsearch/bin/x-pack/users list

admin : superuser

test : – ###创建用户时没有添加-r 参数,所以没有用户角色

3、 测试用户登录

# curl http://localhost:9200/_xpack/ –user admin:kbsonlong

{“build”:{“hash”:”7763f8e”,”date”:”2016-10-26T04:51:59.202Z”},”license”:{“uid”:”06a82587-66ac-4d4a-90c4-857d9ca7f3bc”,”type”:”trial”,”mode”:”trial”,”status”:”active”,”expiry_date_in_millis”:1483753731066},”features”:{“graph”:{“description”:”Graph Data Exploration for the Elastic Stack”,”available”:true,”enabled”:true},”monitoring”:{“description”:”Monitoring for the Elastic Stack”,”available”:true,”enabled”:true},”security”:{“description”:”Security for the Elastic Stack”,”available”:true,”enabled”:true},”watcher”:{“description”:”Alerting, Notification and Automation for the Elastic Stack”,”available”:true,”enabled”:true}},”tagline”:”You know, for X”}

4、 删除用户

# /data/PRG/elasticsearch/bin/x-pack/users userdel test

# /data/PRG/elasticsearch/bin/x-pack/users list

admin : superuser

十、 安装 filebeat

# cd /data/PRG

# tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz

# mv filebeat-5.0.0-linux-x86_64 filebeat

配置启动脚本

# vim /etc/init.d/filebeat

#!/bin/bash
#
# filebeat          filebeat shipper
#
# chkconfig: 2345 98 02
#

### BEGIN INIT INFO
# Provides:          filebeat
# Required-Start:    $local_fs $network $syslog
# Required-Stop:     $local_fs $network $syslog
# Default-Start:     2 3 4 5
# Default-Stop:      0 1 6
# Short-Description: Sends log files to Logstash or directly to Elasticsearch.
# Description:       filebeat is a shipper part of the Elastic Beats
#                     family. Please see: https://www.elastic.co/products/beats
### END INIT INFO

 

PATH=/usr/bin:/sbin:/bin:/usr/sbin
export PATH

[ -f /etc/sysconfig/filebeat ] && . /etc/sysconfig/filebeat
pidfile=${PIDFILE-/data/PRG/filebeat/filebeat.pid}
agent=${PB_AGENT-/data/PRG/filebeat/filebeat}
args=”-c /data/PRG/filebeat/filebeat.yml”
test_args=”-e -configtest”
wrapper=”filebeat-god”
wrapperopts=”-r / -n -p $pidfile”
RETVAL=0

# Source function library.
. /etc/rc.d/init.d/functions

# Determine if we can use the -p option to daemon, killproc, and status.
# RHEL < 5 can’t.
if status | grep -q — ‘-p’ 2>/dev/null; then
    daemonopts=”–pidfile $pidfile”
    pidopts=”-p $pidfile”
fi

test() {
    $agent $args $test_args
}

start() {
    echo -n $”Starting filebeat: ”
    test
    if [ $? -ne 0 ]; then
        echo
        exit 1
    fi
    daemon $daemonopts $wrapper $wrapperopts — $agent $args
    RETVAL=$?
    echo
    return $RETVAL
}

stop() {
    echo -n $”Stopping filebeat: ”
    killproc $pidopts $wrapper
    RETVAL=$?
    echo
    [ $RETVAL = 0 ] && rm -f ${pidfile}
}

restart() {
    test
    if [ $? -ne 0 ]; then
        return 1
    fi
    stop
    start
}

rh_status() {
    status $pidopts $wrapper
    RETVAL=$?
    return $RETVAL
}

rh_status_q() {
    rh_status >/dev/null 2>&1
}

case “$1” in
    start)
        start
    ;;
    stop)
        stop
    ;;
    restart)
        restart
    ;;
    condrestart|try-restart)
        rh_status_q || exit 0
        restart
    ;;
    status)
        rh_status
    ;;
    *)
        echo $”Usage: $0 {start|stop|status|restart|condrestart}”
        exit 1
esac

exit $RETVAL

 

# cat filebeat/filebeat.yml |grep -v ‘#’

filebeat.prospectors:

– input_type: log

paths:

– /tmp/nginx.log

output.logstash:

enabled: true

hosts: [“localhost:10200”]

启动 filebeat

# /etc/init.d/filebeat5 start

Starting filebeat: 2016/12/08 07:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path: [/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path: [/data/PRG/filebeat/logs]

2016/12/08 07:18:37.177681 beat.go:174: INFO Setup Beat: filebeat; Version: 5.0.0

2016/12/08 07:18:37.177760 logstash.go:90: INFO Max Retries set to: 3

2016/12/08 07:18:37.177828 outputs.go:106: INFO Activated logstash as output plugin.

2016/12/08 07:18:37.177912 publish.go:291: INFO Publisher name: operation

2016/12/08 07:18:37.178158 async.go:63: INFO Flush Interval set to: 1s

2016/12/08 07:18:37.178170 async.go:64: INFO Max Bulk Size set to: 2048

Config OK

[ OK ]

# /etc/init.d/filebeat5 status

filebeat-god (pid 7365) is running…

# ps -ef |grep filebeat

root 7405 1 0 15:18 pts/1 00:00:00 filebeat-god -r / -n -p /data/PRG/filebeat/filebeat.pid — /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml

root 7406 7405 0 15:18 pts/1 00:00:00 /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml

# netstat -ntlp | egrep ‘9200|9300|5601|9600|10200’

tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 14339/node

tcp 0 0 :::9300 :::* LISTEN 14205/java

tcp 0 0 :::10200 :::* LISTEN 14309/java

tcp 0 0 ::ffff:127.0.0.1:9600 :::* LISTEN 14309/java

tcp 0 0 :::9200 :::* LISTEN 14205/java


蜷缩的蜗牛 , 版权所有丨如未注明 , 均为原创丨 转载请注明Elk 实时日志分析平台 5.0 版本源码安装配置
喜欢 (0)
[]
分享 (0)

您必须 登录 才能发表评论!