From Phobos D'thorga, 3 Months ago, written in Bash.
This paste will perish in 7 Months.
Embed
  1. /etc/logstash/conf.d/02-beats-input.conf
  2. -----------------------------------------
  3. input {
  4.   beats {
  5.     port => 5044
  6.   }
  7. }
  8. -----------------------------------------
  9.  
  10.  
  11. /etc/logstash/conf.d/30-elasticsearch-output.conf
  12. -----------------------------------------
  13. elasticsearch {
  14.   hosts => ["https://<redacted>:9200"]
  15.   protocol => "https"
  16.   compression_level => 0
  17.   index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  18.   user => "<redacted>"
  19.   password => "<redacted>"
  20. }
  21. -----------------------------------------
  22.  
  23.  
  24. /etc/logstash/logstash.yml
  25. -----------------------------------------
  26. # Settings file in YAML
  27. #
  28. # Settings can be specified either in hierarchical form, e.g.:
  29. #
  30. #   pipeline:
  31. #     batch:
  32. #       size: 125
  33. #       delay: 5
  34. #
  35. # Or as flat keys:
  36. #
  37. #   pipeline.batch.size: 125
  38. #   pipeline.batch.delay: 5
  39. #
  40. #================================ Inputs ======================================
  41.  
  42. # input.beats.port: 5044
  43. # input.beats.codec: json
  44. # input.beats.type: "SuricataIDPS"
  45. #
  46. # ------------  Node identity ------------
  47. #
  48. # Use a descriptive name for the node:
  49. #
  50. node.name: metrics.gekkofyre.io
  51. #
  52. # If omitted the node name will default to the machine's host name
  53. #
  54. # ------------ Data path ------------------
  55. #
  56. # Which directory should be used by logstash and its plugins
  57. # for any persistent needs. Defaults to LOGSTASH_HOME/data
  58. #
  59. path.data: /var/lib/logstash
  60. #
  61. # ------------ Pipeline Settings --------------
  62. #
  63. # The ID of the pipeline.
  64. #
  65. pipeline.id: gk-logstash
  66. #
  67. # Set the number of workers that will, in parallel, execute the filters+outputs
  68. # stage of the pipeline.
  69. #
  70. # This defaults to the number of the host's CPU cores.
  71. #
  72. pipeline.workers: 4
  73. #
  74. # How many events to retrieve from inputs before sending to filters+workers
  75. #
  76. pipeline.batch.size: 1024
  77. #
  78. # How long to wait in milliseconds while polling for the next event
  79. # before dispatching an undersized batch to filters+outputs
  80. #
  81. pipeline.batch.delay: 50
  82. #
  83. # Force Logstash to exit during shutdown even if there are still inflight
  84. # events in memory. By default, logstash will refuse to quit until all
  85. # received events have been pushed to the outputs.
  86. #
  87. # WARNING: enabling this can lead to data loss during shutdown
  88. #
  89. # pipeline.unsafe_shutdown: false
  90. #
  91. # ------------ Pipeline Configuration Settings --------------
  92. #
  93. # Where to fetch the pipeline configuration for the main pipeline
  94. #
  95. # path.config:
  96. #
  97. # Pipeline configuration string for the main pipeline
  98. #
  99. # config.string:
  100. #
  101. # At startup, test if the configuration is valid and exit (dry run)
  102. #
  103. # config.test_and_exit: false
  104. #
  105. # Periodically check if the configuration has changed and reload the pipeline
  106. # This can also be triggered manually through the SIGHUP signal
  107. #
  108. # config.reload.automatic: false
  109. #
  110. # How often to check if the pipeline configuration has changed (in seconds)
  111. #
  112. # config.reload.interval: 3s
  113. #
  114. # Show fully compiled configuration as debug log message
  115. # NOTE: --log.level must be 'debug'
  116. #
  117. # config.debug: false
  118. #
  119. # When enabled, process escaped characters such as \n and \" in strings in the
  120. # pipeline configuration files.
  121. #
  122. # config.support_escapes: false
  123. #
  124. # ------------ Module Settings ---------------
  125. # Define modules here.  Modules definitions must be defined as an array.
  126. # The simple way to see this is to prepend each `name` with a `-`, and keep
  127. # all associated variables under the `name` they are associated with, and
  128. # above the next, like this:
  129. #
  130. # modules:
  131. #   - name: MODULE_NAME
  132. #     var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
  133. #     var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
  134. #     var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
  135. #     var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
  136. #
  137. # Module variable names must be in the format of
  138. #
  139. # var.PLUGIN_TYPE.PLUGIN_NAME.KEY
  140. #
  141. # modules:
  142. #
  143. #
  144. # ------------ Cloud Settings ---------------
  145. # Define Elastic Cloud settings here.
  146. # Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
  147. # and it may have an label prefix e.g. staging:dXMtZ...
  148. # This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
  149. # cloud.id: <identifier>
  150. #
  151. # Format of cloud.auth is: <user>:<pass>
  152. # This is optional
  153. # If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
  154. # If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
  155. # cloud.auth: elastic:<password>
  156. #
  157. # ------------ Queuing Settings --------------
  158. #
  159. # Internal queuing model, "memory" for legacy in-memory based queuing and
  160. # "persisted" for disk-based acked queueing. Defaults is memory
  161. #
  162. queue.type: persisted
  163. #
  164. # If using queue.type: persisted, the directory path where the data files will be stored.
  165. # Default is path.data/queue
  166. #
  167. path.queue: /var/lib/logstash/queue
  168. #
  169. # If using queue.type: persisted, the page data files size. The queue data consists of
  170. # append-only data files separated into pages. Default is 64mb
  171. #
  172. queue.page_capacity: 128mb
  173. #
  174. # If using queue.type: persisted, the maximum number of unread events in the queue.
  175. # Default is 0 (unlimited)
  176. #
  177. # queue.max_events: 0
  178. #
  179. # If using queue.type: persisted, the total capacity of the queue in number of bytes.
  180. # If you would like more unacked events to be buffered in Logstash, you can increase the
  181. # capacity using this setting. Please make sure your disk drive has capacity greater than
  182. # the size specified here. If both max_bytes and max_events are specified, Logstash will pick
  183. # whichever criteria is reached first
  184. # Default is 1024mb or 1gb
  185. #
  186. queue.max_bytes: 16384mb
  187. #
  188. # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
  189. # Default is 1024, 0 for unlimited
  190. #
  191. queue.checkpoint.acks: 8192
  192. #
  193. # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
  194. # Default is 1024, 0 for unlimited
  195. #
  196. queue.checkpoint.writes: 8192
  197. #
  198. # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
  199. # Default is 1000, 0 for no periodic checkpoint.
  200. #
  201. queue.checkpoint.interval: 1000
  202. #
  203. # ------------ Dead-Letter Queue Settings --------------
  204. # Flag to turn on dead-letter queue.
  205. #
  206. dead_letter_queue.enable: true
  207.  
  208. # If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
  209. # will be dropped if they would increase the size of the dead letter queue beyond this setting.
  210. # Default is 1024mb
  211. dead_letter_queue.max_bytes: 8192mb
  212.  
  213. # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
  214. # Default is path.data/dead_letter_queue
  215. #
  216. path.dead_letter_queue: /var/lib/logstash/dead_letter_queue
  217. #
  218. # ------------ Metrics Settings --------------
  219. #
  220. # Bind address for the metrics REST endpoint
  221. #
  222. # http.host: "127.0.0.1"
  223. #
  224. # Bind port for the metrics REST endpoint, this option also accept a range
  225. # (9600-9700) and logstash will pick up the first available ports.
  226. #
  227. # http.port: 9600-9700
  228. #
  229. # ------------ Debugging Settings --------------
  230. #
  231. # Options for log.level:
  232. #   * fatal
  233. #   * error
  234. #   * warn
  235. #   * info (default)
  236. #   * debug
  237. #   * trace
  238. #
  239. log.level: warn
  240. path.logs: /var/log/logstash
  241. #
  242. # ------------ Other Settings --------------
  243. #
  244. # Where to find custom plugins
  245. # path.plugins: []
  246. #
  247. # Flag to output log lines of each pipeline in its separate log file. Each log filename contains the pipeline.name
  248. # Default is false
  249. pipeline.separate_logs: false
  250. #
  251. # ------------ X-Pack Settings (not applicable for OSS build)--------------
  252. #
  253. # X-Pack Monitoring
  254. # https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
  255. # https://www.elastic.co/guide/en/logstash/7.1/configuring-logstash.html
  256. #
  257. xpack.monitoring.enabled: true
  258. xpack.monitoring.elasticsearch.username: "<redacted>"
  259. xpack.monitoring.elasticsearch.password: "<redacted>"
  260. xpack.monitoring.elasticsearch.hosts: ["https://<redacted>:9200"]
  261. # xpack.monitoring.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ]
  262. # xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
  263. # xpack.monitoring.elasticsearch.ssl.truststore.password: password
  264. # xpack.monitoring.elasticsearch.ssl.keystore.path: "/etc/elasticsearch/elasticsearch.keystore"
  265. # xpack.monitoring.elasticsearch.ssl.keystore.password: password
  266. # xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
  267. xpack.monitoring.elasticsearch.sniffing: true
  268. xpack.monitoring.collection.interval: 10s
  269. xpack.monitoring.collection.pipeline.details.enabled: true
  270. #
  271. # X-Pack Management
  272. # https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
  273. xpack.management.enabled: false
  274. # xpack.management.pipeline.id: ["main", "nginx_logs"]
  275. # xpack.management.elasticsearch.username: "<redacted>"
  276. # xpack.management.elasticsearch.password: "<redacted>"
  277. # xpack.management.elasticsearch.hosts: ["https://<redacted>:9200"]
  278. # xpack.management.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ]
  279. # xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
  280. # xpack.management.elasticsearch.ssl.truststore.password: password
  281. # xpack.management.elasticsearch.ssl.keystore.path: "/etc/elasticsearch/elasticsearch.keystore"
  282. # xpack.management.elasticsearch.ssl.keystore.password: password
  283. # xpack.management.elasticsearch.ssl.verification_mode: certificate
  284. # xpack.management.elasticsearch.sniffing: true
  285. # xpack.management.logstash.poll_interval: 5s
  286. -----------------------------------------