001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelExchangeException;
042import org.apache.camel.Endpoint;
043import org.apache.camel.ErrorHandlerFactory;
044import org.apache.camel.Exchange;
045import org.apache.camel.Navigate;
046import org.apache.camel.Processor;
047import org.apache.camel.Producer;
048import org.apache.camel.StreamCache;
049import org.apache.camel.Traceable;
050import org.apache.camel.processor.aggregate.AggregationStrategy;
051import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
052import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
053import org.apache.camel.spi.RouteContext;
054import org.apache.camel.spi.TracedRouteNodes;
055import org.apache.camel.spi.UnitOfWork;
056import org.apache.camel.support.ServiceSupport;
057import org.apache.camel.util.AsyncProcessorConverterHelper;
058import org.apache.camel.util.AsyncProcessorHelper;
059import org.apache.camel.util.CastUtils;
060import org.apache.camel.util.EventHelper;
061import org.apache.camel.util.ExchangeHelper;
062import org.apache.camel.util.IOHelper;
063import org.apache.camel.util.KeyValueHolder;
064import org.apache.camel.util.ObjectHelper;
065import org.apache.camel.util.ServiceHelper;
066import org.apache.camel.util.StopWatch;
067import org.apache.camel.util.concurrent.AtomicException;
068import org.apache.camel.util.concurrent.AtomicExchange;
069import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
070import org.slf4j.Logger;
071import org.slf4j.LoggerFactory;
072
073import static org.apache.camel.util.ObjectHelper.notNull;
074
075
076/**
077 * Implements the Multicast pattern to send a message exchange to a number of
078 * endpoints, each endpoint receiving a copy of the message exchange.
079 *
080 * @version 
081 * @see Pipeline
082 */
083public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable {
084
085    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
086
087    /**
088     * Class that represent each step in the multicast route to do
089     */
090    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
091        private final int index;
092        private final Processor processor;
093        private final Processor prepared;
094        private final Exchange exchange;
095
096        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
097            this.index = index;
098            this.processor = processor;
099            this.prepared = prepared;
100            this.exchange = exchange;
101        }
102
103        public int getIndex() {
104            return index;
105        }
106
107        public Exchange getExchange() {
108            return exchange;
109        }
110
111        public Producer getProducer() {
112            if (processor instanceof Producer) {
113                return (Producer) processor;
114            }
115            return null;
116        }
117
118        public Processor getProcessor() {
119            return prepared;
120        }
121
122        public void begin() {
123            // noop
124        }
125
126        public void done() {
127            // noop
128        }
129
130    }
131
132    /**
133     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
134     * <p/>
135     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
136     */
137    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
138
139        public PreparedErrorHandler(RouteContext key, Processor value) {
140            super(key, value);
141        }
142
143    }
144
145    protected final Processor onPrepare;
146    private final CamelContext camelContext;
147    private Collection<Processor> processors;
148    private final AggregationStrategy aggregationStrategy;
149    private final boolean parallelProcessing;
150    private final boolean streaming;
151    private final boolean parallelAggregate;
152    private final boolean stopOnException;
153    private final ExecutorService executorService;
154    private final boolean shutdownExecutorService;
155    private ExecutorService aggregateExecutorService;
156    private final long timeout;
157    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
158    private final boolean shareUnitOfWork;
159
160    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
161        this(camelContext, processors, null);
162    }
163
164    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
165        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
166    }
167
168    @Deprecated
169    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
170                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
171                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
172        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
173                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
174    }
175
176    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
177                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
178                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
179                              boolean parallelAggregate) {
180        notNull(camelContext, "camelContext");
181        this.camelContext = camelContext;
182        this.processors = processors;
183        this.aggregationStrategy = aggregationStrategy;
184        this.executorService = executorService;
185        this.shutdownExecutorService = shutdownExecutorService;
186        this.streaming = streaming;
187        this.stopOnException = stopOnException;
188        // must enable parallel if executor service is provided
189        this.parallelProcessing = parallelProcessing || executorService != null;
190        this.timeout = timeout;
191        this.onPrepare = onPrepare;
192        this.shareUnitOfWork = shareUnitOfWork;
193        this.parallelAggregate = parallelAggregate;
194    }
195
196    @Override
197    public String toString() {
198        return "Multicast[" + getProcessors() + "]";
199    }
200
201    public String getTraceLabel() {
202        return "multicast";
203    }
204
205    public CamelContext getCamelContext() {
206        return camelContext;
207    }
208
209    public void process(Exchange exchange) throws Exception {
210        AsyncProcessorHelper.process(this, exchange);
211    }
212
213    public boolean process(Exchange exchange, AsyncCallback callback) {
214        final AtomicExchange result = new AtomicExchange();
215        Iterable<ProcessorExchangePair> pairs = null;
216
217        try {
218            boolean sync = true;
219
220            pairs = createProcessorExchangePairs(exchange);
221
222            if (isParallelProcessing()) {
223                // ensure an executor is set when running in parallel
224                ObjectHelper.notNull(executorService, "executorService", this);
225                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
226            } else {
227                sync = doProcessSequential(exchange, result, pairs, callback);
228            }
229
230            if (!sync) {
231                // the remainder of the multicast will be completed async
232                // so we break out now, then the callback will be invoked which then continue routing from where we left here
233                return false;
234            }
235        } catch (Throwable e) {
236            exchange.setException(e);
237            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
238            // and do the done work
239            doDone(exchange, null, pairs, callback, true, false);
240            return true;
241        }
242
243        // multicasting was processed successfully
244        // and do the done work
245        Exchange subExchange = result.get() != null ? result.get() : null;
246        doDone(exchange, subExchange, pairs, callback, true, true);
247        return true;
248    }
249
250    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
251                                     final boolean streaming, final AsyncCallback callback) throws Exception {
252
253        ObjectHelper.notNull(executorService, "ExecutorService", this);
254        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
255
256        final CompletionService<Exchange> completion;
257        if (streaming) {
258            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
259            completion = new ExecutorCompletionService<Exchange>(executorService);
260        } else {
261            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
262            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
263        }
264
265        final AtomicInteger total = new AtomicInteger(0);
266        final Iterator<ProcessorExchangePair> it = pairs.iterator();
267
268        if (it.hasNext()) {
269            // when parallel then aggregate on the fly
270            final AtomicBoolean running = new AtomicBoolean(true);
271            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
272            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
273            final AtomicException executionException = new AtomicException();
274
275            // issue task to execute in separate thread so it can aggregate on-the-fly
276            // while we submit new tasks, and those tasks complete concurrently
277            // this allows us to optimize work and reduce memory consumption
278            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
279                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
280            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
281
282            LOG.trace("Starting to submit parallel tasks");
283
284            while (it.hasNext()) {
285                final ProcessorExchangePair pair = it.next();
286                final Exchange subExchange = pair.getExchange();
287                updateNewExchange(subExchange, total.intValue(), pairs, it);
288
289                completion.submit(new Callable<Exchange>() {
290                    public Exchange call() throws Exception {
291                        // only start the aggregation task when the task is being executed to avoid staring
292                        // the aggregation task to early and pile up too many threads
293                        if (aggregationTaskSubmitted.compareAndSet(false, true)) {
294                            // but only submit the task once
295                            aggregateExecutorService.submit(aggregateOnTheFlyTask);
296                        }
297
298                        if (!running.get()) {
299                            // do not start processing the task if we are not running
300                            return subExchange;
301                        }
302
303                        try {
304                            doProcessParallel(pair);
305                        } catch (Throwable e) {
306                            subExchange.setException(e);
307                        }
308
309                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
310                        Integer number = getExchangeIndex(subExchange);
311                        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
312                        if (stopOnException && !continueProcessing) {
313                            // signal to stop running
314                            running.set(false);
315                            // throw caused exception
316                            if (subExchange.getException() != null) {
317                                // wrap in exception to explain where it failed
318                                CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
319                                subExchange.setException(cause);
320                            }
321                        }
322
323                        LOG.trace("Parallel processing complete for exchange: {}", subExchange);
324                        return subExchange;
325                    }
326                });
327
328                total.incrementAndGet();
329            }
330
331            // signal all tasks has been submitted
332            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
333            allTasksSubmitted.set(true);
334
335            // its to hard to do parallel async routing so we let the caller thread be synchronously
336            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
337            // wait for aggregation to be done
338            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
339            aggregationOnTheFlyDone.await();
340
341            // did we fail for whatever reason, if so throw that caused exception
342            if (executionException.get() != null) {
343                if (LOG.isDebugEnabled()) {
344                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
345                }
346                throw executionException.get();
347            }
348        }
349
350        // no everything is okay so we are done
351        LOG.debug("Done parallel processing {} exchanges", total);
352    }
353
354    /**
355     * Task to aggregate on-the-fly for completed tasks when using parallel processing.
356     * <p/>
357     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
358     * before we perform aggregation. Instead this separate thread will run and aggregate when new
359     * completed tasks is done.
360     * <p/>
361     * The logic is fairly complex as this implementation has to keep track how far it got, and also
362     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
363     * processing when the entire splitting is done.
364     */
365    private final class AggregateOnTheFlyTask implements Runnable {
366
367        private final AtomicExchange result;
368        private final Exchange original;
369        private final AtomicInteger total;
370        private final CompletionService<Exchange> completion;
371        private final AtomicBoolean running;
372        private final CountDownLatch aggregationOnTheFlyDone;
373        private final AtomicBoolean allTasksSubmitted;
374        private final AtomicException executionException;
375
376        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
377                                      CompletionService<Exchange> completion, AtomicBoolean running,
378                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
379                                      AtomicException executionException) {
380            this.result = result;
381            this.original = original;
382            this.total = total;
383            this.completion = completion;
384            this.running = running;
385            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
386            this.allTasksSubmitted = allTasksSubmitted;
387            this.executionException = executionException;
388        }
389
390        public void run() {
391            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
392
393            try {
394                aggregateOnTheFly();
395            } catch (Throwable e) {
396                if (e instanceof Exception) {
397                    executionException.set((Exception) e);
398                } else {
399                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
400                }
401            } finally {
402                // must signal we are done so the latch can open and let the other thread continue processing
403                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
404                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
405                aggregationOnTheFlyDone.countDown();
406            }
407        }
408
409        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
410            boolean timedOut = false;
411            boolean stoppedOnException = false;
412            final StopWatch watch = new StopWatch();
413            int aggregated = 0;
414            boolean done = false;
415            // not a for loop as on the fly may still run
416            while (!done) {
417                // check if we have already aggregate everything
418                if (allTasksSubmitted.get() && aggregated >= total.get()) {
419                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
420                    break;
421                }
422
423                Future<Exchange> future;
424                if (timedOut) {
425                    // we are timed out but try to grab if some tasks has been completed
426                    // poll will return null if no tasks is present
427                    future = completion.poll();
428                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
429                } else if (timeout > 0) {
430                    long left = timeout - watch.taken();
431                    if (left < 0) {
432                        left = 0;
433                    }
434                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
435                    future = completion.poll(left, TimeUnit.MILLISECONDS);
436                } else {
437                    LOG.trace("Polling completion task #{}", aggregated);
438                    // we must not block so poll every second
439                    future = completion.poll(1, TimeUnit.SECONDS);
440                    if (future == null) {
441                        // and continue loop which will recheck if we are done
442                        continue;
443                    }
444                }
445
446                if (future == null) {
447                    // timeout occurred
448                    AggregationStrategy strategy = getAggregationStrategy(null);
449                    if (strategy instanceof TimeoutAwareAggregationStrategy) {
450                        // notify the strategy we timed out
451                        Exchange oldExchange = result.get();
452                        if (oldExchange == null) {
453                            // if they all timed out the result may not have been set yet, so use the original exchange
454                            oldExchange = original;
455                        }
456                        ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated, total.intValue(), timeout);
457                    } else {
458                        // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
459                        LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated);
460                    }
461                    LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated);
462                    timedOut = true;
463
464                    // mark that index as timed out, which allows us to try to retrieve
465                    // any already completed tasks in the next loop
466                    if (completion instanceof SubmitOrderedCompletionService) {
467                        ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
468                    }
469                } else {
470                    // there is a result to aggregate
471                    Exchange subExchange = future.get();
472
473                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
474                    Integer number = getExchangeIndex(subExchange);
475                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
476                    if (stopOnException && !continueProcessing) {
477                        // we want to stop on exception and an exception or failure occurred
478                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
479                        // so we should set the failed exchange as the result and break out
480                        result.set(subExchange);
481                        stoppedOnException = true;
482                        break;
483                    }
484
485                    // we got a result so aggregate it
486                    if (parallelAggregate) {
487                        doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
488                    } else {
489                        doAggregate(getAggregationStrategy(subExchange), result, subExchange);
490                    }
491                }
492
493                aggregated++;
494            }
495
496            if (timedOut || stoppedOnException) {
497                if (timedOut) {
498                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
499                }
500                if (stoppedOnException) {
501                    LOG.debug("Cancelling tasks due stopOnException.");
502                }
503                // cancel tasks as we timed out (its safe to cancel done tasks)
504                running.set(false);
505            }
506        }
507    }
508
509    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
510        AtomicInteger total = new AtomicInteger();
511        Iterator<ProcessorExchangePair> it = pairs.iterator();
512
513        while (it.hasNext()) {
514            ProcessorExchangePair pair = it.next();
515            Exchange subExchange = pair.getExchange();
516            updateNewExchange(subExchange, total.get(), pairs, it);
517
518            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
519            if (!sync) {
520                if (LOG.isTraceEnabled()) {
521                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
522                }
523                // the remainder of the multicast will be completed async
524                // so we break out now, then the callback will be invoked which then continue routing from where we left here
525                return false;
526            }
527
528            if (LOG.isTraceEnabled()) {
529                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
530            }
531
532            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
533            // remember to test for stop on exception and aggregate before copying back results
534            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
535            if (stopOnException && !continueProcessing) {
536                if (subExchange.getException() != null) {
537                    // wrap in exception to explain where it failed
538                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
539                    subExchange.setException(cause);
540                }
541                // we want to stop on exception, and the exception was handled by the error handler
542                // this is similar to what the pipeline does, so we should do the same to not surprise end users
543                // so we should set the failed exchange as the result and be done
544                result.set(subExchange);
545                return true;
546            }
547
548            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
549
550            if (parallelAggregate) {
551                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
552            } else {
553                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
554            }
555            
556            total.incrementAndGet();
557        }
558
559        LOG.debug("Done sequential processing {} exchanges", total);
560
561        return true;
562    }
563
564    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
565                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
566                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
567        boolean sync = true;
568
569        final Exchange exchange = pair.getExchange();
570        Processor processor = pair.getProcessor();
571        final Producer producer = pair.getProducer();
572
573        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
574
575        // compute time taken if sending to another endpoint
576        final StopWatch watch = producer != null ? new StopWatch() : null;
577
578        try {
579            // prepare tracing starting from a new block
580            if (traced != null) {
581                traced.pushBlock();
582            }
583
584            if (producer != null) {
585                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
586            }
587            // let the prepared process it, remember to begin the exchange pair
588            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
589            pair.begin();
590            sync = async.process(exchange, new AsyncCallback() {
591                public void done(boolean doneSync) {
592                    // we are done with the exchange pair
593                    pair.done();
594
595                    // okay we are done, so notify the exchange was sent
596                    if (producer != null) {
597                        long timeTaken = watch.stop();
598                        Endpoint endpoint = producer.getEndpoint();
599                        // emit event that the exchange was sent to the endpoint
600                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
601                    }
602
603                    // we only have to handle async completion of the routing slip
604                    if (doneSync) {
605                        return;
606                    }
607
608                    // continue processing the multicast asynchronously
609                    Exchange subExchange = exchange;
610
611                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
612                    // remember to test for stop on exception and aggregate before copying back results
613                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
614                    if (stopOnException && !continueProcessing) {
615                        if (subExchange.getException() != null) {
616                            // wrap in exception to explain where it failed
617                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
618                        } else {
619                            // we want to stop on exception, and the exception was handled by the error handler
620                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
621                            // so we should set the failed exchange as the result and be done
622                            result.set(subExchange);
623                        }
624                        // and do the done work
625                        doDone(original, subExchange, pairs, callback, false, true);
626                        return;
627                    }
628
629                    try {
630                        if (parallelAggregate) {
631                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
632                        } else {
633                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
634                        }
635                    } catch (Throwable e) {
636                        // wrap in exception to explain where it failed
637                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
638                        // and do the done work
639                        doDone(original, subExchange, pairs, callback, false, true);
640                        return;
641                    }
642
643                    total.incrementAndGet();
644
645                    // maybe there are more processors to multicast
646                    while (it.hasNext()) {
647
648                        // prepare and run the next
649                        ProcessorExchangePair pair = it.next();
650                        subExchange = pair.getExchange();
651                        updateNewExchange(subExchange, total.get(), pairs, it);
652                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
653
654                        if (!sync) {
655                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
656                            return;
657                        }
658
659                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
660                        // remember to test for stop on exception and aggregate before copying back results
661                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
662                        if (stopOnException && !continueProcessing) {
663                            if (subExchange.getException() != null) {
664                                // wrap in exception to explain where it failed
665                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
666                            } else {
667                                // we want to stop on exception, and the exception was handled by the error handler
668                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
669                                // so we should set the failed exchange as the result and be done
670                                result.set(subExchange);
671                            }
672                            // and do the done work
673                            doDone(original, subExchange, pairs, callback, false, true);
674                            return;
675                        }
676
677                        // must catch any exceptions from aggregation
678                        try {
679                            if (parallelAggregate) {
680                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
681                            } else {
682                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
683                            }
684                        } catch (Throwable e) {
685                            // wrap in exception to explain where it failed
686                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
687                            // and do the done work
688                            doDone(original, subExchange, pairs, callback, false, true);
689                            return;
690                        }
691
692                        total.incrementAndGet();
693                    }
694
695                    // do the done work
696                    subExchange = result.get() != null ? result.get() : null;
697                    doDone(original, subExchange, pairs, callback, false, true);
698                }
699            });
700        } finally {
701            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
702            if (traced != null) {
703                traced.popBlock();
704            }
705        }
706
707        return sync;
708    }
709
710    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
711        final Exchange exchange = pair.getExchange();
712        Processor processor = pair.getProcessor();
713        Producer producer = pair.getProducer();
714
715        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
716
717        // compute time taken if sending to another endpoint
718        StopWatch watch = null;
719        if (producer != null) {
720            watch = new StopWatch();
721        }
722
723        try {
724            // prepare tracing starting from a new block
725            if (traced != null) {
726                traced.pushBlock();
727            }
728
729            if (producer != null) {
730                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
731            }
732            // let the prepared process it, remember to begin the exchange pair
733            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
734            pair.begin();
735            // we invoke it synchronously as parallel async routing is too hard
736            AsyncProcessorHelper.process(async, exchange);
737        } finally {
738            pair.done();
739            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
740            if (traced != null) {
741                traced.popBlock();
742            }
743            if (producer != null) {
744                long timeTaken = watch.stop();
745                Endpoint endpoint = producer.getEndpoint();
746                // emit event that the exchange was sent to the endpoint
747                // this is okay to do here in the finally block, as the processing is not using the async routing engine
748                //( we invoke it synchronously as parallel async routing is too hard)
749                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
750            }
751        }
752    }
753
754    /**
755     * Common work which must be done when we are done multicasting.
756     * <p/>
757     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
758     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
759     * of being scattered.
760     *
761     * @param original     the original exchange
762     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
763     * @param pairs        the pairs with the exchanges to process
764     * @param callback     the callback
765     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
766     * @param forceExhaust whether or not error handling is exhausted
767     */
768    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
769                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
770
771        // we are done so close the pairs iterator
772        if (pairs != null && pairs instanceof Closeable) {
773            IOHelper.close((Closeable) pairs, "pairs", LOG);
774        }
775
776        AggregationStrategy strategy = getAggregationStrategy(subExchange);
777        // invoke the on completion callback
778        if (strategy instanceof CompletionAwareAggregationStrategy) {
779            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
780        }
781
782        // cleanup any per exchange aggregation strategy
783        removeAggregationStrategyFromExchange(original);
784
785        // we need to know if there was an exception, and if the stopOnException option was enabled
786        // also we would need to know if any error handler has attempted redelivery and exhausted
787        boolean stoppedOnException = false;
788        boolean exception = false;
789        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
790        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
791            // there was an exception and we stopped
792            stoppedOnException = isStopOnException();
793            exception = true;
794        }
795
796        // must copy results at this point
797        if (subExchange != null) {
798            if (stoppedOnException) {
799                // if we stopped due an exception then only propagte the exception
800                original.setException(subExchange.getException());
801            } else {
802                // copy the current result to original so it will contain this result of this eip
803                ExchangeHelper.copyResults(original, subExchange);
804            }
805        }
806
807        // .. and then if there was an exception we need to configure the redelivery exhaust
808        // for example the noErrorHandler will not cause redelivery exhaust so if this error
809        // handled has been in use, then the exhaust would be false (if not forced)
810        if (exception) {
811            // multicast uses error handling on its output processors and they have tried to redeliver
812            // so we shall signal back to the other error handlers that we are exhausted and they should not
813            // also try to redeliver as we will then do that twice
814            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
815        }
816
817        callback.done(doneSync);
818    }
819
820    /**
821     * Aggregate the {@link Exchange} with the current result.
822     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
823     *
824     * @param strategy the aggregation strategy to use
825     * @param result   the current result
826     * @param exchange the exchange to be added to the result
827     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
828     */
829    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
830        doAggregateInternal(strategy, result, exchange);
831    }
832
833    /**
834     * Aggregate the {@link Exchange} with the current result.
835     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
836     * In all other cases, this method is called from the doAggregate which is a synchronized method
837     *
838     * @param strategy the aggregation strategy to use
839     * @param result   the current result
840     * @param exchange the exchange to be added to the result
841     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
842     */
843    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
844        if (strategy != null) {
845            // prepare the exchanges for aggregation
846            Exchange oldExchange = result.get();
847            ExchangeHelper.prepareAggregation(oldExchange, exchange);
848            result.set(strategy.aggregate(oldExchange, exchange));
849        }
850    }
851
852    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
853                                     Iterator<ProcessorExchangePair> it) {
854        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
855        if (it.hasNext()) {
856            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
857        } else {
858            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
859        }
860    }
861
862    protected Integer getExchangeIndex(Exchange exchange) {
863        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
864    }
865
866    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
867        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
868
869        StreamCache streamCache = null;
870        if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) {
871            // in parallel processing case, the stream must be copied, therefore get the stream
872            streamCache = (StreamCache) exchange.getIn().getBody();
873        }
874
875        int index = 0;
876        for (Processor processor : processors) {
877            // copy exchange, and do not share the unit of work
878            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
879
880            if (streamCache != null) {
881                if (index > 0) {
882                    // copy it otherwise parallel processing is not possible,
883                    // because streams can only be read once
884                    StreamCache copiedStreamCache = streamCache.copy();
885                    if (copiedStreamCache != null) {
886                        copy.getIn().setBody(copiedStreamCache);  
887                    }
888                }
889            }
890
891            // If the multi-cast processor has an aggregation strategy
892            // then the StreamCache created by the child routes must not be 
893            // closed by the unit of work of the child route, but by the unit of 
894            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
895            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
896            // if it is not already set.
897            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
898                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
899            }
900            // if we share unit of work, we need to prepare the child exchange
901            if (isShareUnitOfWork()) {
902                prepareSharedUnitOfWork(copy, exchange);
903            }
904
905            // and add the pair
906            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
907            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
908        }
909
910        if (exchange.getException() != null) {
911            // force any exceptions occurred during creation of exchange paris to be thrown
912            // before returning the answer;
913            throw exchange.getException();
914        }
915
916        return result;
917    }
918
919    /**
920     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
921     * <p/>
922     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
923     * need to be specially prepared before use.
924     *
925     * @param index        the index
926     * @param processor    the processor
927     * @param exchange     the exchange
928     * @param routeContext the route context
929     * @return prepared for use
930     */
931    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
932                                                                RouteContext routeContext) {
933        Processor prepared = processor;
934
935        // set property which endpoint we send to
936        setToEndpoint(exchange, prepared);
937
938        // rework error handling to support fine grained error handling
939        prepared = createErrorHandler(routeContext, exchange, prepared);
940
941        // invoke on prepare on the exchange if specified
942        if (onPrepare != null) {
943            try {
944                onPrepare.process(exchange);
945            } catch (Exception e) {
946                exchange.setException(e);
947            }
948        }
949        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
950    }
951
952    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
953        Processor answer;
954
955        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
956
957        // do not wrap in error handler if we are inside a try block
958        if (!tryBlock && routeContext != null) {
959            // wrap the producer in error handler so we have fine grained error handling on
960            // the output side instead of the input side
961            // this is needed to support redelivery on that output alone and not doing redelivery
962            // for the entire multicast block again which will start from scratch again
963
964            // create key for cache
965            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
966
967            // lookup cached first to reuse and preserve memory
968            answer = errorHandlers.get(key);
969            if (answer != null) {
970                LOG.trace("Using existing error handler for: {}", processor);
971                return answer;
972            }
973
974            LOG.trace("Creating error handler for: {}", processor);
975            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
976            // create error handler (create error handler directly to keep it light weight,
977            // instead of using ProcessorDefinition.wrapInErrorHandler)
978            try {
979                processor = builder.createErrorHandler(routeContext, processor);
980
981                // and wrap in unit of work processor so the copy exchange also can run under UoW
982                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
983
984                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
985
986                // must start the error handler
987                ServiceHelper.startServices(answer);
988
989                // here we don't cache the child unit of work
990                if (!child) {
991                    // add to cache
992                    errorHandlers.putIfAbsent(key, answer);
993                }
994
995            } catch (Exception e) {
996                throw ObjectHelper.wrapRuntimeCamelException(e);
997            }
998        } else {
999            // and wrap in unit of work processor so the copy exchange also can run under UoW
1000            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1001        }
1002
1003        return answer;
1004    }
1005
1006    /**
1007     * Strategy to create the unit of work to be used for the sub route
1008     *
1009     * @param routeContext the route context
1010     * @param processor    the processor
1011     * @param exchange     the exchange
1012     * @return the unit of work processor
1013     */
1014    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
1015        String routeId = routeContext != null ? routeContext.getRoute().idOrCreate(routeContext.getCamelContext().getNodeIdFactory()) : null;
1016        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
1017
1018        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1019        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1020        if (parent != null) {
1021            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeId, parent));
1022        } else {
1023            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeId));
1024        }
1025
1026        // and then in route context so we can keep track which route this is at runtime
1027        if (routeContext != null) {
1028            internal.addAdvice(new CamelInternalProcessor.RouteContextAdvice(routeContext));
1029        }
1030        return internal;
1031    }
1032
1033    /**
1034     * Prepares the exchange for participating in a shared unit of work
1035     * <p/>
1036     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1037     * in a shared unit of work.
1038     *
1039     * @param childExchange  the child exchange
1040     * @param parentExchange the parent exchange
1041     */
1042    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1043        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1044    }
1045
1046    protected void doStart() throws Exception {
1047        if (isParallelProcessing() && executorService == null) {
1048            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1049        }
1050        if (timeout > 0 && !isParallelProcessing()) {
1051            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1052        }
1053        if (isParallelProcessing() && aggregateExecutorService == null) {
1054            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1055            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1056            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1057            String name = getClass().getSimpleName() + "-AggregateTask";
1058            aggregateExecutorService = createAggregateExecutorService(name);
1059        }
1060        ServiceHelper.startServices(aggregationStrategy, processors);
1061    }
1062
1063    /**
1064     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1065     * completed tasks when running in parallel mode.
1066     *
1067     * @param name  the suggested name for the background thread
1068     * @return the thread pool
1069     */
1070    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1071        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1072        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1073    }
1074
1075    @Override
1076    protected void doStop() throws Exception {
1077        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1078    }
1079
1080    @Override
1081    protected void doShutdown() throws Exception {
1082        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1083        // only clear error handlers when shutting down
1084        errorHandlers.clear();
1085
1086        if (shutdownExecutorService && executorService != null) {
1087            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1088        }
1089        if (aggregateExecutorService != null) {
1090            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1091        }
1092    }
1093
1094    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1095        if (processor instanceof Producer) {
1096            Producer producer = (Producer) processor;
1097            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1098        }
1099    }
1100
1101    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1102        AggregationStrategy answer = null;
1103
1104        // prefer to use per Exchange aggregation strategy over a global strategy
1105        if (exchange != null) {
1106            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1107            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1108            if (map != null) {
1109                answer = map.get(this);
1110            }
1111        }
1112        if (answer == null) {
1113            // fallback to global strategy
1114            answer = getAggregationStrategy();
1115        }
1116        return answer;
1117    }
1118
1119    /**
1120     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1121     *
1122     * @param exchange            the exchange
1123     * @param aggregationStrategy the strategy
1124     */
1125    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1126        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1127        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1128        if (map == null) {
1129            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1130        } else {
1131            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1132            // we just create a new copy if we need to change the map
1133            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1134        }
1135        // store the strategy using this processor as the key
1136        // (so we can store multiple strategies on the same exchange)
1137        map.put(this, aggregationStrategy);
1138        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1139    }
1140
1141    /**
1142     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1143     * which must be done after use.
1144     *
1145     * @param exchange the current exchange
1146     */
1147    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1148        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1149        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1150        if (map == null) {
1151            return;
1152        }
1153        // remove the strategy using this processor as the key
1154        map.remove(this);
1155    }
1156
1157    /**
1158     * Is the multicast processor working in streaming mode?
1159     * <p/>
1160     * In streaming mode:
1161     * <ul>
1162     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1163     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1164     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1165     * </ul>
1166     */
1167    public boolean isStreaming() {
1168        return streaming;
1169    }
1170
1171    /**
1172     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1173     */
1174    public boolean isStopOnException() {
1175        return stopOnException;
1176    }
1177
1178    /**
1179     * Returns the producers to multicast to
1180     */
1181    public Collection<Processor> getProcessors() {
1182        return processors;
1183    }
1184
1185    /**
1186     * An optional timeout in millis when using parallel processing
1187     */
1188    public long getTimeout() {
1189        return timeout;
1190    }
1191
1192    /**
1193     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1194     */
1195    public AggregationStrategy getAggregationStrategy() {
1196        return aggregationStrategy;
1197    }
1198
1199    public boolean isParallelProcessing() {
1200        return parallelProcessing;
1201    }
1202
1203    public boolean isShareUnitOfWork() {
1204        return shareUnitOfWork;
1205    }
1206
1207    public List<Processor> next() {
1208        if (!hasNext()) {
1209            return null;
1210        }
1211        return new ArrayList<Processor>(processors);
1212    }
1213
1214    public boolean hasNext() {
1215        return processors != null && !processors.isEmpty();
1216    }
1217}