码迷,mamicode.com
首页 > 其他好文 > 详细

ELK专题(3)使用redis与logstash结合收集数据

时间:2020-11-18 12:15:15      阅读:5      评论:0      收藏:0      [点我收藏+]

标签:_for   filebeat   pre   path   highlight   https   server   服务器配置   col   

ELK--使用redis与logstash结合收集数据

kibana

kibana

是为elasticsearch提供web可视化界面,为用户提供数据展示

kibana安装

[root\@kibana \~]# ls

  1. anaconda-ks.cfg GeoLite2-City.tar.gz kibana-7.1.1-x86_64.rpm

  2. [root\@kibana \~]# yum -y install kibana-7.1.1-x86_64.rpm

修改kibana配置文件

#开启端口

#设置侦听端口

#设置elasticsearch主机#设置语言,不用汉化

[root\@kibana \~]# cat /etc/kibana/kibana.yml | grep -v "#" | grep -v "\^\$"
server.port: 5601

server.host: "192.168.122.10"

elasticsearch.hosts: ["http://192.168.122.20:9200","http://192.168.122.30:9200"]
i18n.locale: "zh-CN"

启动服务

1 # systemctl start kibana;systemctl enable kibana;systemctl status kibana

访问测试页面

URL:http://kibana_ip:5601

技术图片

使用?lebeat收集数据

  • filebeat.inputs:
    
    >   \- type: log enabled: true paths:
    
    \- /var/log/nginx/access.log filebeat.config.modules:
    
    >   path: \${path.config}/modules.d/\*.yml reload.enabled: false
    
    >   setup.template.settings: index.number_of_shards: 1
    
    setup.kibana: output.logstash:
    
    >   hosts: ["192.168.122.40:5044"]
    
    processors:
    
    -   add_host_metadata: \~
    
    -   add_cloud_metadata: \~
\#cat /etc/filebeat/filebeat.yml
# cat /etc/logstash/conf.d/remote_filebeat_nginx.conf input {

>   beats {

>   port =\> 5044

>   host =\> "0.0.0.0"

>   }

}

output {

>   elasticsearch {

>   hosts =\> ["192.168.122.20:9200","192.168.122.30:9200"]

>   index =\> "remote_filebeat_nginx_app-%{+YYYY.MM.dd}"

>   }

>   stdout {

>   codec =\> rubydebug

>   }

}

使用redis与logstash结合收集数据

技术图片

#logstash to redis

#在需要收集nginx日志服务

# cat /etc/logstash/conf.d/logstash_to_redis.conf input {

>   file {

path =\> "/var/log/nginx/access.log"

start_position =\> "beginning"

}

}

filter {

}

output {

>   redis {

>   host =\> ["192.168.122.40:6379"]

>   password =\> "123456"

>   db =\> "0"

>   data_type =\> "list"

>   key =\> "logstashtoredis"

>   }

}

requirepass 123456 \#480 line

#启动服务

#systemctl enable redis #systemctl start redis

#验证

#redis-cli -h 192.168.122.40 -a 123456

\>keys *

#61 line

bind 0.0.0.0

#cat /etc/redis.conf

#在redis服务器上配置

#配置logstash连接es

# cat /etc/logstash/conf.d/logstash_from_redis.conf input {

redis {

host =\> "192.168.122.40"

port =\> 6379

password =\> "123456"

db =\> "0"

data_type =\> "list"

key =\> "logstashtoredis"

}

filter {

}

output {

elasticsearch {

hosts =\> ["http://192.168.122.20:9200","http://192.168.122.30:9200"]

index =\> "logstashtoredis-redisfromlogstash-%{+YYYY.MM.dd}"

}

stdout { codec=\> rubydebug }

}

使用?lebeat与redis结合收集数据

1 #在web服务器上使用filebeat收集日志 2

3 ## cat /etc/filebeat/filebeat.yml

4 ###################### Filebeat Configuration
Example ######################### 5

  1. # This file is an example configuration file highlighting only the most
    common

  2. # options. The filebeat.reference.yml file from the same directory contains
    all the

  3. # supported options with more comments. You can use it as a reference. 9 #

  4. # You can find the full configuration reference here:

  5. #
    https://www.elastic.co/guide/en/beats/filebeat/index.html
    12

  6. # For more available modules and options, please see the
    filebeat.reference.yml sample

  7. # configuration file. 15

    16 #========================== Filebeat inputs

17

18 filebeat.inputs:

19

  1. # Each - is an input. Most options can be set at the input level, so

  2. # you can use different inputs for various configurations.

  3. # Below are the input specific configurations. 23

    24 - type: log 25

  4. # Change to true to enable this input configuration.

  5. enabled: true 28

  6. # Paths that should be crawled and fetched. Glob based paths.

  7. paths:

  8. - /var/log/nginx/access.log

  9. #- c:\programdata\elasticsearch\logs\* 33

  10. # Exclude lines. A list of regular expressions to match. It drops the lines
    that are

  11. # matching any regular expression from the list.

  12. #exclude_lines: [‘\^DBG‘] 37

  13. # Include lines. A list of regular expressions to match. It exports the
    lines that are

  14. # matching any regular expression from the list.

  15. #include_lines: [‘\^ERR‘, ‘\^WARN‘] 41

  16. # Exclude files. A list of regular expressions to match. Filebeat drops the
    files that

  17. # are matching any regular expression from the list. By default, no files
    are dropped.

  18. #exclude_files: [‘.gz\$‘] 45

  19. # Optional additional fields. These fields can be freely picked

  20. # to add additional information to the crawled log files for filtering

  21. #fields:

  22. # level: debug

  23. # review: 1 51

    52 ### Multiline options 53

  24. # Multiline can be used for log messages spanning multiple lines. This is
    common

  25. # for Java Stack Traces or C-Line Continuation 56

  26. # The regexp Pattern that has to be matched. The example pattern matches
    all lines starting with [

  27. #multiline.pattern: \^\[ 59

  28. # Defines if the pattern set under pattern should be negated or not.
    Default is false.

  29. #multiline.negate: false 62

  30. # Match can be set to "after" or "before". It is used to define if lines
    should be append to a pattern

  31. # that was (not) matched before or after or as long as a pattern is not
    matched based on negate.

  32. # Note: After is the equivalent to previous and before is the equivalent to
    to next in Logstash

  33. #multiline.match: after 67

    68

    69 #============================= Filebeat modules

70

  1. filebeat.config.modules:

  2. # Glob pattern for configuration loading

  3. path: \${path.config}/modules.d/*.yml

74

  1. # Set to true to enable config reloading

  2. reload.enabled: false 77

  3. # Period on which files under path should be checked for changes

  4. #reload.period: 10s 80

    81 #==================== Elasticsearch template setting
    ========================== 82

  5. setup.template.settings:

  6. index.number_of_shards: 1

  7. #index.codec: best_compression

  8. #_source.enabled: false 87

    88 #================================ General

89

  1. # The name of the shipper that publishes the network data. It can be used
    to group

  2. # all the transactions sent by a single shipper in the web interface.

  3. #name:

    93

  4. # The tags of the shipper are included in their own field with each

  5. # transaction published.

  6. #tags: ["service-X", "web-tier"] 97

  7. # Optional fields that you can specify to add additional information to the

  8. # output.

  9. #fields:

  10. # env: staging 102

    103

    104 #============================== Dashboards

  11. # These settings control loading the sample dashboards to the Kibana index.
    Loading

  12. # the dashboards is disabled by default and can be enabled either by
    setting the

  13. # options here or by using the `setup` command.

  14. #setup.dashboards.enabled: false 109

  15. # The URL from where to download the dashboards archive. By default this
    URL

  16. # has a value which is computed based on the Beat name and version. For
    released

  17. # versions, this URL points to the dashboard archive on the
    artifacts.elastic.co

  18. # website.

  19. #setup.dashboards.url:

  20. 116 #============================== Kibana

117

  1. # Starting with Beats version 6.0.0, the dashboards are loaded via the
    Kibana API.

  2. # This requires a Kibana endpoint configuration.

  3. setup.kibana:

  4. # Kibana Host

  5. # Scheme and port can be left out and will be set to the default (http and
    5601)

  6. # In case you specify and additional path, the scheme is required:
    http://localhost:5601/path

  7. # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601

  8. #host: "localhost:5601" 127

  9. # Kibana Space ID

  10. # ID of the Kibana Space into which the dashboards should be loaded. By
    default,

  11. # the Default Space will be used.

  12. #space.id:

  13. 133 #============================= Elastic Cloud

134

135 # These settings simplify using filebeat with the Elastic Cloud
(https://cloud.elastic.co/).

136

  1. # The cloud.id setting overwrites the `output.elasticsearch.hosts` and

  2. # `setup.kibana.host` options.

  3. # You can find the `cloud.id` in the Elastic Cloud web UI.

  4. #cloud.id:

  5. # The cloud.auth setting overwrites the `output.elasticsearch.username`
    and

  6. # `output.elasticsearch.password` settings. The format is
    `\<user\>:\<pass\>`.

  7. #cloud.auth:

  8. 146 #================================ Outputs

147

148 # Configure what output to use when sending the data collected by the
beat. 149

150 #-------------------------- Elasticsearch output

  1. #output.elasticsearch:

  2. # Array of hosts to connect to.

  3. #hosts: ["localhost:9200"] 154

  4. # Optional protocol and basic auth credentials.

  5. #protocol: "https"

  6. #username: "elastic"

  7. #password: "changeme" 159

    160 #----------------------------- Logstash output

  8. #output.logstash:

  9. # The Logstash hosts

    163 #hosts: ["192.168.122.40:5044"]

    164

  10. # Optional SSL. By default is off.

  11. # List of root certificates for HTTPS server verifications

  12. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 168

  13. # Certificate for SSL client authentication

  14. #ssl.certificate: "/etc/pki/client/cert.pem" 171

  15. # Client Certificate Key

  16. #ssl.key: "/etc/pki/client/cert.key" 174

    175

    176

    177

178 #----------------------------- redis output

  1. output.redis:

  2. # The redis hosts

    181 hosts: ["192.168.122.40"]

    182 password: "123456"

  3. key: "filebeattoredis"

  4. db: 0

  5. datatype: list 186

    187 ########重点重点###########

    188

    189

    190

    191 #================================ Processors

192

193 # Configure processors to enhance or manipulate events generated by the
beat. 194

  1. processors:

  2. - add_host_metadata: \~

  3. - add_cloud_metadata: \~ 198

    199 #================================ Logging

200

  1. # Sets log level. The default log level is info.

  2. # Available log levels are: error, warning, info, debug

  3. #logging.level: debug 204

  4. # At debug level, you can selectively enable logging only for some
    components.

  5. # To enable all selectors use ["*"]. Examples of other selectors are
    "beat",

  6. # "publish", "service".

  7. #logging.selectors: ["*"] 209

    210 #============================== Xpack Monitoring

  8. # filebeat can export internal metrics to a central Elasticsearch
    monitoring

  9. # cluster. This requires xpack monitoring to be enabled in Elasticsearch.
    The

  10. # reporting is disabled by default. 214

  11. # Set to true to enable the monitoring reporter.

  12. #xpack.monitoring.enabled: false 217

  13. # Uncomment to send the metrics to Elasticsearch. Most settings from the

  14. # Elasticsearch output are accepted here as well. Any setting that is not
    set is

  15. # automatically inherited from the Elasticsearch output configuration, so
    if you

  16. # have the Elasticsearch output configured, you can simply uncomment the

  17. # following line.

  18. #xpack.monitoring.elasticsearch:

  19. 225 #================================= Migration

226

  1. # This allows to enable 6.7 migration aliases

  2. #migration.6_to_7.enabled: true

requirepass 123456 #480 line

#启动服务

#systemctl enable redis #systemctl start redis

#验证

#redis-cli -h 192.168.122.40 -a 123456

\>keys *

#61 line

bind 0.0.0.0

#cat /etc/redis.conf

在redis服务器配置

}

elasticsearch {

>   hosts =\> ["http://192.168.122.20:9200","http://192.168.122.30:9200"]

>   index =\> "filebeattoredis-logstashfromredis-%{+YYYY.MM.dd}"

}

stdout { codec=\> rubydebug }

filter {

}

output {

#在logstash服务器配置

[root\@logstash \~]\# cat /etc/logstash/conf.d/logstash_from_redis.conf input {

>   redis {

>   host =\> "192.168.122.40"

>   port =\> 6379

>   password =\> "123456"

>   db =\> "0"

>   data_type =\> "list"

>   key =\> "filebeattoredis"

>   }

}

生产案例

通过elk系统分析nginx日志

技术图片

1 # yum -y install nginx

log_format json ‘{ "\@timestamp": "\$time_iso8601", ‘ ‘"remote_addr":
"\$remote_addr", ‘ ‘"remote_user": "\$remote_user", ‘ ‘"body_bytes_sent":
"\$body_bytes_sent", ‘ ‘"request_time": "\$request_time", ‘

‘"status": "\$status", ‘ ‘"request_uri": "\$request_uri", ‘

‘"request_method": "\$request_method", ‘ ‘"http_referer": "\$http_referer",
‘ ‘"http_x_forwarded_for": "\$http_x_forwarded_for", ‘ ‘"http_user_agent":
"\$http_user_agent"}‘;

access_log /var/log/nginx/access.log json;

#访问并查看日志

main ‘\$remote_addr - \$remote_user [\$time_local] "\$request" ‘ ‘\$status
\$body_bytes_sent "\$http_referer" ‘

‘"\$http_user_agent" "\$http_x_forwarded_for"‘;

http {

log_format

#对nginx日志格式化

# systemctl start redis ; systemctl enable redis ; systemctl status redis

# redis-cli -h 192.168.122.40 -a 123456

192.168.122.40:6379\> keys *

1) "filebeattoredis"

192.168.122.40:6379\> llen filebeattoredis

(integer) 5

#480 line

requirepass 123456

# cat /etc/redis.conf | grep -v "#" | grep -v "\^\$"

bind 192.168.122.40 #61 line

# cat /etc/filebeat/filebeat.yml | grep -v "#" | grep -v "\^\$"
filebeat.inputs:

- type: log enabled: true paths:

- /var/log/nginx/access.log filebeat.config.modules:

path: \${path.config}/modules.d/*.yml reload.enabled: false

output.redis:

hosts: ["192.168.122.40"]

password: "123456" key: "filebeattoredis" db: 0

datatype: list processors:

  • add_host_metadata: \~

  • add_cloud_metadata: \~

# systemctl start filebeat ; systemctl enable filebeat ; systemctl status
filebeat

#使用filebeat收集日志,存在redis中

# yum -y install filebeat

#配置logstash,传入es

  1. ]# cat /etc/logstash/conf.d/logstash_from_redis.conf

  2. input {

  3. redis {

    5 host =\> "192.168.122.40"

    6 port =\> 6379

    7 password =\> "123456"

    8 db =\> "0"

data_type =\> "list"

key =\> "filebeattoredis"

}

}

filter {

}

output {

elasticsearch {

hosts =\> ["http://192.168.122.20:9200","http://192.168.122.30:9200"]

index =\> "filebeattoredis-logstashfromredis-%{+YYYY.MM.dd}"

}

stdout { codec=\> rubydebug }

}

]# /usr/share/logstash/bin/logstash --path.settings /etc/logstash -f

/etc/logstash/conf.d/logstash_from_redis.conf

技术图片

进入kibana进行操作

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

使用logstash对nginx日志进行过滤

# echo \> /var/log/nginx/access.log

#清空日志

/var/log/nginx/access.log main;

access_log

main ‘\$remote_addr - \$remote_user [\$time_local] "\$request" ‘ ‘\$status
\$body_bytes_sent "\$http_referer" ‘

‘"\$http_user_agent" "\$http_x_forwarded_for"‘;

http {

log_format

#nginx日志

#filebeat配置文件

  1. [root\@app \~]# cat /etc/filebeat/filebeat.yml

  2. filebeat.inputs:

  3. - type: log

  4. enabled: true

  5. paths:

  6. - /var/log/nginx/access.log

fields: app: www

type: nginx

fields_under_root: true

output.redis:

hosts: ["192.168.122.40"]

password: "123456" key: "filebeat" db: 0

datatype: list

1 #redis

2 192.168.122.40:6379\> keys *

3 1) "filebeat"

4 192.168.122.40:6379\> llen filebeat

5 (integer) 1 6

#使用logstash grok插件完成对nginx日志格式化

# cat /etc/logstash/conf.d/logstash_nginx_format.conf input {

redis {

host =\> "192.168.122.40"

port =\> 6379

password =\> "123456"

db =\> "0"

data_type =\> "list" key =\> "filebeat"

}

}

filter {

if [app] == "www" {

if [type] == "nginx" { grok {

match =\> {

19 "message" =\> "%{IPV4:remote_addr} - (%{USERNAME:remote_user}|-) \[%

{HTTPDATE:time_local}\] \"%{WORD:request_method}
%{URIPATHPARAM:request_uri} HTTP/%

{NUMBER:http_protocol}\" %{NUMBER:http_status} %{NUMBER:body_bytes_sent}
\"%

{GREEDYDATA:http_referer}\" \"%{GREEDYDATA:http_user_agent}\" \"(%

{IPV4:http_x_forwarded_for}|-)\"" 20 }

21 overwrite =\> ["message"] 22 }

  1. geoip {

  2. source =\> "remote_addr"

  3. target =\> "geoip"

  4. database =\> "/opt/GeoLite2-City.mmdb"

  5. add_field =\> ["[geoip][coordinates]", "%{[geoip][longitude]}"]

  6. add_field =\> ["[geoip][coordinates]", "%{[geoip][latitude]}"] 29 }

  7. date {

  8. locale =\> "en"

  9. match =\> ["time_local", "dd/MMM/yyyy:HH:mm:ss Z"] 33 }

  10. mutate {

  11. convert =\> ["[geoip][coordinates]", "float"] 36 }

  12. output {

  13. elasticsearch {

    43 hosts =\> ["http://192.168.122.20:9200","http://192.168.122.30:9200"]

    44 index =\> "logstash-nginx-log-format-%{type}-%{+YYYY.MM.dd}" 45 }

    46 stdout{codec =\> rubydebug } 47 }

    48

    49

    50

    51

    52 # /usr/share/logstash/bin/logstash --path.settings /etc/logstash -f

    /etc/logstash/conf.d/logstash_nginx_format.conf

kibana中操作nginx日志视图

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

技术图片

ELK专题(3)使用redis与logstash结合收集数据

标签:_for   filebeat   pre   path   highlight   https   server   服务器配置   col   

原文地址:https://blog.51cto.com/14625831/2549446

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!