21 Storm案例

demo1 wc累加案例

  • pom.xml
  • 注意运行时报错java.lang.ClassNotFoundException: backtype.storm.topology.IRichSpout。注释掉作用域
  • 注意运行时报错java.lang.NoSuchMethodError: com.lmax.disruptor.RingBuffer.。制定lmax版本
    <dependency>
      <groupId>org.apache.storm</groupId>
      <artifactId>storm-core</artifactId>
      <version>0.9.3</version>
      <scope>provided</scope>
    </dependency>
    <dependency>
      <groupId>com.lmax</groupId>
      <artifactId>disruptor</artifactId>
      <version>3.2.0</version>
    </dependency>
  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import java.util.List;
import java.util.Map;

public class WcSpout extends BaseRichSpout {
    private Map map;
    private TopologyContext topologyContext;
    private SpoutOutputCollector spoutOutputCollector;
    int i = 0;

    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //会一直传递tuple数据流到bolt
    @Override
    public void nextTuple() {
        i++;
        List<Object> tuple = new Values(i);
        spoutOutputCollector.emit(tuple);
        System.err.println("spout-------------"+i);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        //告知传递的数据流名字
        outputFieldsDeclarer.declare(new Fields("num"));
    }
}
  • WcBolt.java
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;
import java.util.Map;

public class WcBolt extends BaseRichBolt {
    private Map map;
    private TopologyContext topologyContext;
    private OutputCollector outputCollector;
    int sum = 0;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.outputCollector = outputCollector;
    }
    //会一直接受来自spout的tuple数据流
    @Override
    public void execute(Tuple tuple) {
        //获取数据
        Integer i = tuple.getIntegerByField("num");
        //求和累加
        sum += i;
        System.out.println("sum-----------" + sum);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

    }
}
  • 测试类Test.java
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        //设定spout和bolt关联
        tb.setSpout("wcSpout", new WcSpout());
        tb.setBolt("wcBolt", new WcBolt()).shuffleGrouping("wcSpout");

        //放入本地集群
        LocalCluster lc = new LocalCluster();
        lc.submitTopology("wc", new Config(), tb.createTopology());
    }
}
  • 测试结果

demo2 WorldCount单词统计案例

  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;

import java.util.List;
import java.util.Map;
import java.util.Random;

public class WcSpout extends BaseRichSpout {

    private SpoutOutputCollector spoutOutputCollector;
    String[] text = {
            "hello welcome hadoop",
            "hello hadoop",
            "welcome storm"
    };
    Random r = new Random();
    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //发送数据给bolt
    @Override
    public void nextTuple() {
        //模拟无限文章
        List line = new Values(text[r.nextInt(text.length)]);
        spoutOutputCollector.emit(line);
        System.err.println("spout---------"+line);
        Utils.sleep(1000);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("line"));
    }
}
  • WcSplitBolt.java切分单词数据
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.List;
import java.util.Map;

public class WcSplitBolt extends BaseRichBolt {
    private OutputCollector outputCollector;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.outputCollector = outputCollector;
    }

    //获取数据  切分单词数据 发送单词
    @Override
    public void execute(Tuple tuple) {
        String line = tuple.getStringByField("line");
        String[] worlds = line.split(" ");
        for(String world : worlds){
            List w = new Values(world);
            outputCollector.emit(w);
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("w"));
    }
}
  • WcCountBolt
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;

import java.util.HashMap;
import java.util.Map;

public class WcCountBolt extends BaseRichBolt {
    private Map<String, Integer> map = new HashMap<>();

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
    }

    @Override
    public void execute(Tuple tuple) {
        String w = tuple.getStringByField("w");
        Integer count = 1;
        if (map.containsKey(w)) {
            count = map.get(w) + 1;
        }
        map.put(w, count);
        System.out.println(w + "-------" + count);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
    }
}
  • Test.java测试类
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        tb.setSpout("WcSpout", new WcSpout());
        //tb.setBolt("WcSplitBolt", new WcSplitBolt()).shuffleGrouping("WcSpout");
        //tb.setBolt("WcCountBolt", new WcCountBolt()).shuffleGrouping("WcSplitBolt");
        //多线程情况下,分发策略需要调整,下面的策略相同的key交给相同的bolt
        tb.setBolt("WcSplitBolt", new WcSplitBolt(), 3).shuffleGrouping("WcSpout");
        tb.setBolt("WcCountBolt", new WcCountBolt(),3).fieldsGrouping("WcSplitBolt",new Fields("w"));

        Config conf = new Config();
        conf.setDebug(false);
        conf.setMessageTimeoutSecs(30);
        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology(args[0], conf, tb.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        } else {
            LocalCluster lc = new LocalCluster();
            lc.submitTopology("wc", conf, tb.createTopology());
        }

    }
}
  • 测试结果

数据流分组(即数据分发策略)

  • 1.Shuffle Grouping 
    • 随机分组,随机派发stream里面的tuple,保证每个bolt task接收到的tuple数目大致相同。
    • 轮询,平均分配 
  • 2.Fields Grouping
    • 按字段分组,比如,按"user-id"这个字段来分组,那么具有同样"user-id"的 tuple 会被分到相同的Bolt里的一个task, 而不同的"user-id"则可能会被分配到不同的task。 
  • 3.All Grouping
    • 广播发送,对于每一个tuple,所有的bolts都会收到 
  • 4.Global Grouping
    • 全局分组,把tuple分配给task id最低的task 。
  • 5.None Grouping
    • 不分组,这个分组的意思是说stream不关心到底怎样分组。目前这种分组和Shuffle grouping是一样的效果。 有一点不同的是storm会把使用none grouping的这个bolt放到这个bolt的订阅者同一个线程里面去执行(未来Storm如果可能的话会这样设计)。 
  • 6.Direct Grouping
    • 指向型分组, 这是一种比较特别的分组方法,用这种分组意味着消息(tuple)的发送者指定由消息接收者的哪个task处理这个消息。只有被声明为 Direct Stream 的消息流可以声明这种分组方法。而且这种消息tuple必须使用 emitDirect 方法来发射。消息处理者可以通过 TopologyContext 来获取处理它的消息的task的id (OutputCollector.emit方法也会返回task的id)  
  • 7.Local or shuffle grouping
    • 本地或随机分组。如果目标bolt有一个或者多个task与源bolt的task在同一个工作进程中,tuple将会被随机发送给这些同进程中的tasks。否则,和普通的Shuffle Grouping行为一致
  • 8.customGrouping
    • 自定义,相当于mapreduce那里自己去实现一个partition一样。

demo3 数据流分组案例

  • 数据源track.log
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:52
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:53
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:51
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:50
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 10:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 10:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:52
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:51
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:53
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:50
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:50
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
  • MySpout.java
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.Map;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichSpout;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

public class MySpout implements IRichSpout {
    private static final long serialVersionUID = 1L;
    FileInputStream fis;
    InputStreamReader isr;
    BufferedReader br;
    SpoutOutputCollector collector = null;
    String str = null;

    @Override
    public void nextTuple() {
        try {
            while ((str = this.br.readLine()) != null) {
                // 过滤动作
                collector.emit(new Values(str, str.split("\t")[1]));
            }
        } catch (Exception e) {
        }

    }

    @Override
    public void close() {
        try {
            br.close();
            isr.close();
            fis.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Override
    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
        try {
            this.collector = collector;
            this.fis = new FileInputStream("track.log");
            this.isr = new InputStreamReader(fis, "UTF-8");
            this.br = new BufferedReader(isr);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("log", "session_id"));
    }
    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
    @Override
    public void ack(Object msgId) {
        System.out.println("spout ack:" + msgId.toString());
    }
    @Override
    public void activate() { }
    @Override
    public void deactivate() { }
    @Override
    public void fail(Object msgId) {
        System.out.println("spout fail:" + msgId.toString());
    }
}
  • MyBolt.java
import java.util.Map;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;

public class MyBolt implements IRichBolt {

    private static final long serialVersionUID = 1L;
    OutputCollector collector = null;
    int num = 0;
    String valueString = null;

    @Override
    public void cleanup() { }

    @Override
    public void execute(Tuple input) {
        try {
            valueString = input.getStringByField("log");

            if (valueString != null) {
                num++;
                System.err.println(input.getSourceStreamId() + " " + Thread.currentThread().getName() + "--id="
                        + Thread.currentThread().getId() + "   lines  :" + num + "   session_id:"
                        + valueString.split("\t")[1]);
            }
            collector.ack(input);
        } catch (Exception e) {
            collector.fail(input);
            e.printStackTrace();
        }

    }

    @Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
        this.collector = collector;
    }
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields(""));
    }
    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
}
  • 测试类Main.java(测试每一种数据流分组)
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

public class Main {

    /**
     * @param args
     */
    public static void main(String[] args) {

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new MySpout(), 1);

        // shuffleGrouping其实就是随机往下游去发,不自觉的做到了负载均衡
//      builder.setBolt("bolt", new MyBolt(), 2).shuffleGrouping("spout");

        // fieldsGrouping其实就是MapReduce里面理解的Shuffle,根据fields求hash来取模
//      builder.setBolt("bolt", new MyBolt(), 2).fieldsGrouping("spout", new Fields("session_id"));

        // 只往一个里面发,往taskId小的那个里面去发送
//      builder.setBolt("bolt", new MyBolt(), 2).globalGrouping("spout");

        // 等于shuffleGrouping
//      builder.setBolt("bolt", new MyBolt(), 2).noneGrouping("spout");

        // 广播
        builder.setBolt("bolt", new MyBolt(), 2).allGrouping("spout");

        // Map conf = new HashMap();
        // conf.put(Config.TOPOLOGY_WORKERS, 4);
        Config conf = new Config();
        conf.setDebug(false);
        conf.setMessageTimeoutSecs(30);

        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        } else {
            LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology("mytopology", conf, builder.createTopology());
        }
    }
}

demo4 集群测试

  • 1.使用demo2的代码,发现在测试类加参数就可以集群提交
  • 2.打成 jar包上传服务器
  • 3.使用命令storm jar test-1.0-SNAPSHOT.jar com.hadoop.Test wc

  • 4.查看目录
  • 5.查看UI页面

  • 6.杀死strom任务
    storm kill wc -w 2(延迟2秒运行)

demo5 Storm 并发机制

posted on 2019-07-19 15:07  农夫三拳有點疼  阅读(37)  评论(0编辑  收藏  举报

导航