flume kafka

云计算 waitig 767℃ 百度已收录 0评论

1.查看消费情况

kafka-run-class kafka.tools.ConsumerOffsetChecker --group groupname --topic topicname --zookeeper ip1:2181,ip2:2181,ip3:2181

消费情况说明:

Group           Topic                          Pid Offset          logSize         Lag             Owner
消费者组名   话题名    分区id    当前已消费的条数   总条数    未消费的条数

2.flume-1.6 多source,多channel,多sink配置

#source的名字
agent.sources = pc app
# channels的名字,建议按照type来命名
agent.channels = FilePc  FileApp
# sink的名字,建议按照目标来命名
agent.sinks = hdfsSinkPc  hdfsSinkApp

# 指定source使用的channel名字
agent.sources.pc.channels = FilePc
agent.sources.app.channels = FileApp

# 指定sink需要使用的channel的名字,注意这里是channel
agent.sinks.hdfsSinkPc.channel = FilePc
agent.sinks.hdfsSinkApp.channel = FileApp

#-------- kafkaSource相关配置-----------------
# 定义消息源类型
agent.sources.pc.type = org.apache.flume.source.kafka.KafkaSource
agent.sources.app.type = org.apache.flume.source.kafka.KafkaSource
# 定义kafka所在zk的地址
#
# 这里特别注意: 是kafka的zookeeper的地址注意版本
#
agent.sources.pc.zookeeperConnect = 10.15.201.197:2181,10.15.201.198:2181,10.15.201.199:2181
agent.sources.app.zookeeperConnect = 10.15.201.197:2181,10.15.201.198:2181,10.15.201.199:2181
# 配置消费的kafka topic
agent.sources.pc.topic = suggest_pc_action
agent.sources.app.topic = suggest_app_action

# 配置消费者组的id
agent.sources.pc.groupId = flume57_pc
agent.sources.app.groupId = flume57_app
# 消费超时时间,参照如下写法可以配置其他所有kafka的consumer选项。注意格式从kafka.xxx开始是consumer的配置属性
agent.sources.pc.kafka.consumer.timeout.ms = 1000
agent.sources.app.kafka.consumer.timeout.ms = 1000



#------- Channel相关配置-------------------------
# channel类型
agent.channels.FilePc.type = File
agent.channels.FileApp.type = File
# channel存储的事件容量
agent.channels.FilePc.capacity=10000
agent.channels.FileApp.capacity=10000
# 事务容量 
agent.channels.FilePc.transactionCapacity=1000 
agent.channels.FileApp.transactionCapacity=1000

#---------hdfsSink 相关配置------------------
agent.sinks.hdfsSinkPc.type = hdfs
agent.sinks.hdfsSinkApp.type = hdfs
# 注意, 我们输出到下面一个子文件夹datax中
agent.sinks.hdfsSinkPc.hdfs.path = hdfs://namenodeip:8020/xcardata/log/kafka_log/pc_action_log/%Y%m%d%H
agent.sinks.hdfsSinkPc.hdfs.writeFormat = Text
agent.sinks.hdfsSinkPc.hdfs.fileType = DataStream
#默认值:1024,当临时文件达到该大小(单位:bytes)时,滚动成目标文件
agent.sinks.hdfsSinkPc.hdfs.rollSize = 10240000
#当events数据达到该数量时候,将临时文件滚动成目标文件;如果设置成0,则表示不根据events数据来滚动文件;
agent.sinks.hdfsSinkPc.hdfs.rollCount = 0
#默认值:30,hdfs sink间隔多长将临时文件滚动成最终目标文件,单位:秒;如果设置成0,则表示不根据时间来滚动文件;
agent.sinks.hdfsSinkPc.hdfs.rollInterval = 60


agent.sinks.hdfsSinkApp.hdfs.path = hdfs://namenodeip:8020/xcardata/log/kafka_log/app_action_log/%Y%m%d%H
agent.sinks.hdfsSinkApp.hdfs.writeFormat = Text
agent.sinks.hdfsSinkApp.hdfs.fileType = DataStream
agent.sinks.hdfsSinkApp.hdfs.rollSize = 10240000
agent.sinks.hdfsSinkApp.hdfs.rollCount = 0
agent.sinks.hdfsSinkApp.hdfs.rollInterval = 60

#配置前缀和后缀
agent.sinks.hdfsSinkPc.hdfs.filePrefix=pc_action_log
agent.sinks.hdfsSinkPc.hdfs.fileSuffix=.json
agent.sinks.hdfsSinkApp.hdfs.filePrefix=app_action_log
agent.sinks.hdfsSinkApp.hdfs.fileSuffix=.json

#避免文件在关闭前使用临时文件
#agent.sinks.hdfsSink.hdfs.inUserPrefix=_
#agent.sinks.hdfsSink.hdfs.inUserSuffix=

#自定义拦截器
#agent.sources.kafkaSource.interceptors=i1
#agent.sources.kafkaSource.interceptors.i1.type=com.hadoop.flume.FormatInterceptor$Builder
#agent.channels = memoryChannel

#channels,通道目录配置:把文件事件持久化到本地硬盘eDualCheckpoints
#配置多channel备份
#agent.channels.FilePc.useDualCheckpoints=ture
#agent.channels.FileApp.useDualCheckpoints=ture
#agent.channels.FilePc.backupCheckpointDir=/opt/apache-flume-1.6.0-bin/checkpoint2
agent.channels.FilePc.checkpointDir=/opt/apache-flume-1.6.0-bin/checkpoint_Pc
agent.channels.FileApp.checkpointDir=/opt/apache-flume-1.6.0-bin/checkpoint_App
agent.channels.FilePc.dataDirs=/opt/apache-flume-1.6.0-bin/dataDir_Pc
agent.channels.FileApp.dataDirs=/opt/apache-flume-1.6.0-bin/dataDir_App
#日志文件大小,默认2G,
agent.channels.FilePc.maxFileSize=2146435071
agent.channels.FileApp.maxFileSize=2146435071

3.flume agent 启动

#设置http的 metrics
/opt/apache-flume-1.6.0-bin/bin/flume-ng agent -c conf -f conf/flume-conf.properties -n agent -Dflume.monitoring.type=http -Dflume.monitoring.port=5653 -Dflume.root.logger=INFO,console  >>flume_to_hdfs.log 2>&1 &
ip:port/metrics

{
    "SOURCE.app": {
        "KafkaCommitTimer": "528",
        "KafkaEventGetTimer": "161840",
        "EventReceivedCount": "8272",  //source端成功收到的event数量
        "AppendBatchAcceptedCount": "0", //追加到channel中的批数量
        "Type": "SOURCE",
        "EventAcceptedCount": "8272", //成功放入channel的event数量
        "AppendReceivedCount": "0",//source追加目前收到的数量
        "StartTime": "1511322862896",
        "AppendAcceptedCount": "0", //放入channel的event数量
        "OpenConnectionCount": "0",//打开的连接数
        "AppendBatchReceivedCount": "0",//source端刚刚追加的批数量
        "StopTime": "0" //组件停止时间
    },
    "SOURCE.pc": {
        "KafkaCommitTimer": "2117",
        "KafkaEventGetTimer": "536256",
        "EventReceivedCount": "115891",
        "AppendBatchAcceptedCount": "0",
        "Type": "SOURCE",
        "EventAcceptedCount": "115891",
        "AppendReceivedCount": "0",
        "StartTime": "1511322862896",
        "AppendAcceptedCount": "0",
        "OpenConnectionCount": "0",
        "AppendBatchReceivedCount": "0",
        "StopTime": "0"
    },
    "CHANNEL.FilePc": {
        "ChannelCapacity": "10000", //通道容量
        "ChannelFillPercentage": "1.91", //通道使用比例
        "Type": "CHANNEL",
        "ChannelSize": "191", //目前在channel中的event数量
        "EventTakeSuccessCount": "115700", //从channel中成功取走的event数量
        "EventTakeAttemptCount": "116707",//尝试从channel中取走event的次数
        "StartTime": "1511322861765",
        "EventPutSuccessCount": "115891",//成功放入channel的event数量
        "EventPutAttemptCount": "115891",//尝试放入将event放入channel的次数
        "StopTime": "0"
    },
    "SINK.hdfsSinkPc": {
        "ConnectionCreatedCount": "10",//创建连接数
        "ConnectionClosedCount": "9",//关闭连接数量
        "Type": "SINK",
        "BatchCompleteCount": "830",//完成的批数量
        "BatchEmptyCount": "523",//批量取空的数量
        "EventDrainAttemptCount": "115700",//尝试提交的event数量
        "StartTime": "1511322861782",
        "EventDrainSuccessCount": "115700",//成功发送event的数量
        "BatchUnderflowCount": "484",//正处于批量处理的batch数
        "StopTime": "0",
        "ConnectionFailedCount": "0" //连接失败数
    },
    "SINK.hdfsSinkApp": {
        "ConnectionCreatedCount": "10",
        "ConnectionClosedCount": "9",
        "Type": "SINK",
        "BatchCompleteCount": "13",
        "BatchEmptyCount": "288",
        "EventDrainAttemptCount": "8389",
        "StartTime": "1511322861782",
        "EventDrainSuccessCount": "8389",
        "BatchUnderflowCount": "112",
        "StopTime": "0",
        "ConnectionFailedCount": "0"
    },
    "CHANNEL.FileApp": {
        "ChannelCapacity": "10000",
        "ChannelFillPercentage": "0.0",
        "Type": "CHANNEL",
        "ChannelSize": "0",
        "EventTakeSuccessCount": "8389",
        "EventTakeAttemptCount": "8789",
        "StartTime": "1511322861494",
        "EventPutSuccessCount": "8272",
        "EventPutAttemptCount": "8272",
        "StopTime": "0"
    }
}

ganglia方式

        -Dflume.monitoring.type=ganglia  # 默认情况下flume以Ganglia3.1格式报告指标
        -Dflume.monitoring.pollFrequency=45 # 报告间隔时间(秒)
        -Dflume.monitoring.isGanglia3=true # 启用ganglia3个格式报告 
        -Dflume.root.logger=INFO,console

4.flume envet 数据结构
1.event是flume中处理消息的基本单元,由零个或者多个header和正文body组成。header是一个map,body是一个字节数组,body才是我们实际使用中真正传输的数据,header传输的数据,我们是不会是sink出去的
2.Header 是 key/value 形式的,可以用来制造路由决策或携带其他结构化信息(如事件的时间戳或事件来源的服务器主机名)。你可以把它想象成和 HTTP 头一样提供相同的功能——通过该方法来传输正文之外的额外信息。
3.flume允许用户修改event,添加header,从而通过消息的内容对日志进行路由。具体需要使用的机制有:拦截器interceptor和选择器selector;


本文由【waitig】发表在等英博客
本文固定链接:flume kafka
欢迎关注本站官方公众号,每日都有干货分享!
等英博客官方公众号
点赞 (0)分享 (0)