Java 类com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory 实例源码

项目:ingestion-service    文件:KafkaKinesisIntegrationTest.java   
private void startKinesisConsumer() throws Exception {
    AWSCredentialsProvider credentialsProvider = new
            DefaultAWSCredentialsProviderChain();

    String region = "eu-west-1";
    logger.info("Starting in Region " + region);

    String workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();

    KinesisClientLibConfiguration kinesisClientLibConfiguration = new KinesisClientLibConfiguration(
            this.getClass().getName(), TestConstants.stream, credentialsProvider, workerId)
            .withInitialPositionInStream(InitialPositionInStream.LATEST).withRegionName(region);

    IRecordProcessorFactory recordProcessorFactory = new
            RecordFactory();
    worker = new Worker(recordProcessorFactory,
            kinesisClientLibConfiguration);

    es = Executors.newSingleThreadExecutor();
    es.execute(worker);
}
项目:dynamodb-streams-kafka    文件:KafkaDynamoStreamAdapter.java   
public KafkaDynamoStreamAdapter(String regionName, String srcTable, IRecordProcessorFactory processorFactory) {
    sourceTable = srcTable;
    credentialsProvider = new DefaultAWSCredentialsProviderChain();
    recordProcessorFactory = processorFactory;

    adapterClient = new AmazonDynamoDBStreamsAdapterClient(credentialsProvider, new ClientConfiguration());
    dynamoDBClient = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration());
    cloudWatchClient = new AmazonCloudWatchClient(credentialsProvider, new ClientConfiguration());

    if ("local".equalsIgnoreCase(regionName)) {
        setClientEndpoints(localddbEndpoint);
    } else if (regionName != null) {
        Region region = Region.getRegion(Regions.fromName(regionName));
        adapterClient.setRegion(region);
        dynamoDBClient.setRegion(region);
        cloudWatchClient.setRegion(region);
    }
}
项目:flume-kinesis    文件:KinesisSource.java   
@Override
public void start() {

    IRecordProcessorFactory recordProcessorFactory = new RecordProcessorFactory(getChannelProcessor(),
                                                                                serializer,
                                                                                backOffTimeInMillis,
                                                                                numberRetries,
                                                                                checkpointIntervalMillis);

    worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            System.out.println("Shutting down Kinesis client thread...");
            worker.shutdown();
        }
    });

    try{
        worker.run();
    }catch (AmazonClientException e) {
        logger.error("Can't connect to amazon kinesis", e);
        Throwables.propagate(e);
    }

}
项目:aws-kinesis-beanstalk-workers    文件:ManagedConsumer.java   
public int run() throws Exception {
    configure();

    System.out.println(String.format("Starting %s", appName));
    LOG.info(String.format("Running %s to process stream %s", appName, streamName));

    IRecordProcessorFactory recordProcessorFactory = new ManagedClientProcessorFactory(
            this.templateProcessor);
    Worker worker = new Worker(recordProcessorFactory, this.config);

    int exitCode = 0;
    int failures = 0;

    // run the worker, tolerating as many failures as is configured
    while (failures < failuresToTolerate || failuresToTolerate == -1) {
        try {
            worker.run();
        } catch (Throwable t) {
            LOG.error("Caught throwable while processing data.", t);

            failures++;

            if (failures < failuresToTolerate) {
                LOG.error("Restarting...");
            }
            exitCode = 1;
        }
    }

    return exitCode;
}
项目:awsbigdata    文件:KinesisServer.java   
public static void main(String[] args) throws Exception {
       java.security.Security.setProperty("networkaddress.cache.ttl" , "60");

       Server server = new Server();
       ServerConnector connector = new ServerConnector(server);
       connector.setPort(80);
       server.addConnector(connector);

       ResourceHandler resource_handler = new ResourceHandler();
       resource_handler.setResourceBase(".");
       ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
       context.setContextPath("/");
       ServletHolder holderEvents = new ServletHolder("ws-events", MessageProxyServlet.class);
       context.addServlet(holderEvents, "/kinesisapp/*");

       HandlerList handlers = new HandlerList();
       handlers.setHandlers(new Handler[] { resource_handler, context, new DefaultHandler() });
       server.setHandler(handlers);        
       server.start();

       AWSCredentialsProvider credentialsProvider = new EnvironmentVariableCredentialsProvider();
       KinesisClientLibConfiguration kinesisConfig = new KinesisClientLibConfiguration(
            System.getProperty("kinesisapp.name"), 
            System.getProperty("kinesisapp.stream"), 
            credentialsProvider, Long.toString(System.currentTimeMillis()))
            .withKinesisEndpoint(System.getProperty("kinesisapp.endpoint"));

       IRecordProcessorFactory factory = new Factory();
       Worker worker = new Worker(factory, kinesisConfig);
       worker.run();

       /*
    Processor p = new Processor();
    p.test();
    */
}
项目:amazon-kinesis-aggregators    文件:AggregatorConsumer.java   
public int run() throws Exception {
    configure();

    System.out.println(String.format("Starting %s", appName));
    LOG.info(String.format("Running %s to process stream %s", appName,
            streamName));

    IRecordProcessorFactory recordProcessorFactory = new AggregatorProcessorFactory(
            aggGroup);
    worker = new Worker(recordProcessorFactory, this.config);

    int exitCode = 0;
    int failures = 0;

    // run the worker, tolerating as many failures as is configured
    while (failures < failuresToTolerate || failuresToTolerate == -1) {
        try {
            worker.run();
        } catch (Throwable t) {
            LOG.error("Caught throwable while processing data.", t);

            failures++;

            if (failures < failuresToTolerate) {
                LOG.error("Restarting...");
            } else {
                shutdown();
            }
            exitCode = 1;
        }
    }

    return exitCode;
}
项目:aws-big-data-blog    文件:ManagedConsumer.java   
public int run() throws Exception {
    configure();

    System.out.println(String.format("Starting %s", appName));
    LOG.info(String.format("Running %s to process stream %s", appName, streamName));

    IRecordProcessorFactory recordProcessorFactory = new ManagedClientProcessorFactory(
            this.templateProcessor);
    Worker worker = new Worker(recordProcessorFactory, this.config);

    int exitCode = 0;
    int failures = 0;

    // run the worker, tolerating as many failures as is configured
    while (failures < failuresToTolerate || failuresToTolerate == -1) {
        try {
            worker.run();
        } catch (Throwable t) {
            LOG.error("Caught throwable while processing data.", t);

            failures++;

            if (failures < failuresToTolerate) {
                LOG.error("Restarting...");
            }
            exitCode = 1;
        }
    }

    return exitCode;
}
项目:aws-big-data-blog    文件:KinesisApplication.java   
/**
 * @param args Property file with config overrides (e.g. application name, stream name)
 * @throws IOException Thrown if we can't read properties from the specified properties file
 */
public static void main(String[] args) throws IOException {
    String propertiesFile = null;

    if (args.length > 1) {
        System.err.println("Usage: java " + KinesisApplication.class.getName() + " <propertiesFile>");
        System.exit(1);
    } else if (args.length == 1) {
        propertiesFile = args[0];
    }

    configure(propertiesFile);

    System.out.println("Starting " + applicationName);
    LOG.info("Running " + applicationName + " to process stream " + streamName);


    IRecordProcessorFactory recordProcessorFactory = new KinesisRecordProcessorFactory(redisEndpoint, redisPort);
    Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);



    int exitCode = 0;
    try {
        worker.run();
    } catch (Throwable t) {
        LOG.error("Caught throwable while processing data.", t);
        exitCode = 1;
    }
    System.exit(exitCode);
}
项目:tweetamo    文件:TweetamoServer.java   
/**
 * @param args Property file with config overrides (e.g. application name, stream name)
 * @throws IOException Thrown if we can't read properties from the specified properties file
 */
public static void main(String[] args) throws IOException {
    String propertiesFile = null;

    if (args.length > 1) {
        System.err.println("Usage: java " + TweetamoServer.class.getName() + " <propertiesFile>");
        System.exit(1);
    } else if (args.length == 1) {
        propertiesFile = args[0];
    }

    configure(propertiesFile);

    System.out.println("Starting " + applicationName);
    LOG.info("Running " + applicationName + " to process stream " + streamName);


    IRecordProcessorFactory recordProcessorFactory = new TweetamoRecordProcessorFactory();
     Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);

    int exitCode = 0;
    try {
        worker.run();
    } catch (Throwable t) {
        LOG.error("Caught throwable while processing data.", t);
        exitCode = 1;
    }
    System.exit(exitCode);
}