diff --git a/.gitignore b/.gitignore
index e85f507..225f0c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,8 +36,6 @@ Thumbs.db
 test/tmp/*dat
 env/
 
-.idea
-
 # Documentation #
 #################
 docs/build
@@ -45,9 +43,11 @@ docs/tools/media/
 docs/tools/pages/
 
 
-# test and coverage
-test/tmp/.cov*
+# pycharm
+.idea/*
 
+# test and coverage
+test/.coverage
 
 # Pbr
 pbr-*.egg/
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index eb86ae6..0000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
-  <component name="ProjectLevelVcsManager" settingsEditedManually="false">
-    <OptionsSetting value="true" id="Add" />
-    <OptionsSetting value="true" id="Remove" />
-    <OptionsSetting value="true" id="Checkout" />
-    <OptionsSetting value="true" id="Update" />
-    <OptionsSetting value="true" id="Status" />
-    <OptionsSetting value="true" id="Edit" />
-    <ConfirmationsSetting value="0" id="Add" />
-    <ConfirmationsSetting value="0" id="Remove" />
-  </component>
-  <component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7.11+ (/usr/bin/python2.7)" project-jdk-type="Python SDK" />
-</project>
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..df28c2f
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,35 @@
+language: python
+sudo: true
+python:
+  - "2.7"
+  # - "3.4"
+
+# command to install dependencies
+# some are only used for travis/coveralls so we are installing them here only
+install:
+  - ./test/setup_test.sh
+
+# command to run tests
+script:
+  # so to help eventual debug: knowing what exact versions are in use can be rather useful.
+  - pip freeze
+  # Code static analysis
+  # Not for the example module !
+  # - pep8 --max-line-length=100 --exclude='*.pyc, *.cfg, *.log' --ignore='E402' alignak_module_ws/*
+  # - pylint --rcfile=.pylintrc alignak_module_ws/
+  # Code dynamic analysisk
+  - cd test
+  # notice: the nose-cov is used because it is compatible with --processes, but produce a .coverage by process
+  # so we must combine them in the end
+  - coverage erase
+  - nosetests -xv --process-restartworker --processes=1 --process-timeout=300 --with-cover --cover-package=alignak_module_example test_module.py
+  - coverage combine
+  - cd ..
+# specific call to launch coverage data into coveralls.io
+after_success:
+  # to get coverage data with relative paths and not absolute we have to
+  # execute coveralls from the base directory of the project,
+  # so we need to move the .coverage file here :
+  # mv test/.coverage . && coveralls --rcfile=test/.coveragerc -v
+  mv test/.coverage . && coveralls -v
+
diff --git a/MANIFEST.in b/MANIFEST.in
index c2d5a71..769a2bc 100755
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,6 @@
 include version.py
 include README.rst
 include LICENSE
+include AUTHORS
 include requirements.txt
-recursive-include alignak_module_log *
\ No newline at end of file
+recursive-include alignak_module_example *
\ No newline at end of file
diff --git a/README.rst b/README.rst
index 0264e3a..791a665 100755
--- a/README.rst
+++ b/README.rst
@@ -13,8 +13,8 @@ Build status (stable release)
 Build status (development release)
 ----------------------------------
 
-.. image:: https://travis-ci.org/Alignak-monitoring-contrib/alignak-module-example.svg?branch=develop
-    :target: https://travis-ci.org/Alignak-monitoring-contrib/alignak-module-example
+.. image:: https://travis-ci.org/Alignak-monitoring/alignak-module-example.svg?branch=develop
+    :target: https://travis-ci.org/Alignak-monitoring/alignak-module-example
 
 
 Short description
@@ -48,6 +48,7 @@ that it is possible to have several existing modules for the same feature. The c
     * passive, for a module that will collect passive checks results (NSCA, ...)
     * logs, for a module that will collect monitoring logs
     * action, for a module that will execute some actions (acknownledge, downtime, ...)
+    * poller, for a module that will execute checks in a poller
 
 Old Nagios parameters require that some external modules are installed for the corresponding
 features to be available. The Arbiter will alert if some features are activated and the
@@ -61,8 +62,7 @@ All Alignak modules are stored in their own repository in the `Alignak monitorin
 
 Repository example
 ~~~~~~~~~~~~~~~~~~
-Repository directories and files example:
-::
+Repository directories and files example::
 
     README.rst
     LICENCE
@@ -137,16 +137,14 @@ Installation
 
 From PyPI
 ~~~~~~~~~
-To install the module from PyPI:
-::
+To install the module from PyPI::
 
     pip install alignak-module-EXAMPLE
 
 
 From source files
 ~~~~~~~~~~~~~~~~~
-To install the module from the source files:
-::
+To install the module from the source files::
 
     git clone https://github.com/Alignak-monitoring-contrib/alignak-module-EXAMPLE
     cd alignak-module-EXAMPLE
@@ -176,7 +174,7 @@ To set up several instances of the same module:
 Bugs, issues and contributing
 -----------------------------
 
-Please report any issue using the project `GitHub repository: <https://github.com/Alignak-monitoring-contrib/alignak-module-example/issues>`_.
+Please report any issue using the project `GitHub repository: <https://github.com/Alignak-monitoring/alignak-module-example/issues>`_.
 
 License
 -------
diff --git a/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-EXAMPLE.cfg b/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-EXAMPLE.cfg
deleted file mode 100755
index 2d31760..0000000
--- a/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-EXAMPLE.cfg
+++ /dev/null
@@ -1,14 +0,0 @@
-## Module:      EXAMPLE
-## Loaded by:   Broker
-# .....
-define module {
-    module_alias            EXAMPLE
-    module_type             example
-    python_name             alignak_module_EXAMPLE
-
-    # Module configuration parameters
-    # ---
-    option_1                foo
-    option_2                bar
-    option_3                foobar
-}
diff --git a/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-example.cfg b/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-example.cfg
new file mode 100755
index 0000000..8c91279
--- /dev/null
+++ b/alignak_module_example/ALIGNAKETC/arbiter/modules/mod-example.cfg
@@ -0,0 +1,17 @@
+## Module:      EXAMPLE
+## Loaded by:   Broker
+# .....
+define module {
+    # Module alias to use as a module identifier
+    module_alias            example
+    # List of module types (see readme.rst)
+    module_types            example
+    # Python module name
+    python_name             alignak_module_example
+
+    # Module configuration parameters
+    # ---
+    option_1                foo
+    option_2                bar
+    option_3                foobar
+}
diff --git a/alignak_module_example/example.py b/alignak_module_example/example.py
index ffb551f..9904b20 100755
--- a/alignak_module_example/example.py
+++ b/alignak_module_example/example.py
@@ -23,10 +23,9 @@
 them to a Python logger configured in the module configuration file
 """
 
-import os
-import json
 import time
 import logging
+import inspect
 
 from alignak.basemodule import BaseModule
 
@@ -82,22 +81,29 @@ def __init__(self, mod_conf):
         logger.debug("received configuration: %s", mod_conf.__dict__)
 
         # Options are the variables defined in the module configuration file
-        self.option_1 = getattr(mod_conf, 'option_1', None)
-        self.option_2 = getattr(mod_conf, 'option_2', None)
-        self.option_3 = getattr(mod_conf, 'option_3', None)
+        self.option_1 = getattr(mod_conf, 'option1', None)
+        self.option_2 = getattr(mod_conf, 'option2', None)
+        self.option_3 = getattr(mod_conf, 'option3', None)
+        logger.info("configuration, %s, %s, %s", self.option_1, self.option_2, self.option_3)
 
     def init(self):
         """
         This function initializes the module instance. If False is returned, the modules manager
-        will peiodically retry an to initialize the module.
+        will periodically retry an to initialize the module.
         If an exception is raised, the module will be definitely considered as dead :/
 
+        This function must be present and return True for Alignak to consider the module as loaded
+        and fully functional.
+
         :return: True if initialization is ok, else False
         """
+        logger.info("Test - Example in %s", inspect.stack()[0][3])
         logger.info("Initialization of the example module")
         return True
 
+    # ----------
     # Common functions
+    # ----------
     def do_loop_turn(self):
         """This function is called/used when you need a module with
         a loop function (and use the parameter 'external': True)
@@ -105,19 +111,25 @@ def do_loop_turn(self):
         logger.info("In loop")
         time.sleep(1)
 
-    def hook_tick(self):
+    def hook_tick(self, daemon):
         """This function is called on each daemon 'tick'"""
-        logger.info("In hook tick")
-
-    # Arbiter specific functions
-    ## In execution order
-    def hook_load_retention(self):
-        """This function is called after retention loading"""
-        logger.info("In hook load retention")
-
-    def hook_read_configuration(self):
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    # Redefined in scheduler
+    # def hook_save_retention(self):
+    #     """This function is called to save data - really useful?"""
+    #     logger.info("Test - Example in %s", inspect.stack()[0][3])
+    #
+    # def hook_load_retention(self):
+    #     """This function is called to restore data - really useful?"""
+    #     logger.info("Test - Example in %s", inspect.stack()[0][3])
+
+    # ----------
+    # Arbiter specific functions (In execution order)
+    # ----------
+    def hook_read_configuration(self, daemon):
         """This function is called after conf file reading"""
-        logger.info("In hook read config")
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
     def get_objects(self):
         """This function must return a list of config
@@ -125,7 +137,7 @@ def get_objects(self):
 
         This is usefull when your module import object from external database
         """
-        logger.info("Ask me for objects to return")
+        logger.info("Test - Example in %s", inspect.stack()[0][3])
 
         r = {'hosts': []}
 
@@ -137,115 +149,151 @@ def get_objects(self):
 
         r['hosts'].append({
             'host_name': "module_host_1",
-            'use': 'linux-server',
             'address': 'localhost'
         })
 
         logger.info("Returning hosts objects to the Arbiter: %s", str(r))
         return r
 
-    def hook_early_configuration(self):
+    def hook_early_configuration(self, daemon):
         """This function is called after getting all config objects"""
-        logger.info("In hook early config")
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
-    def hook_late_configuration(self, conf):
+    def hook_late_configuration(self, daemon):
         """This function is called after configuration compilation
         This the last step of configuration reading
         """
-        logger.info("In hook late config")
-
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
+    # ----------
     # scheduler only module
-    def update_retention_objects(self):
-        pass
-    def load_retention_objects(self):
-        pass
-    def hook_save_retention(self, scheduler):
-        logger.info("Dummy in save retention")
-    def hook_load_retention(self, scheduler):
-        logger.info("Dummy in load retention")
-    def hook_get_new_actions(self, scheduler):
-        logger.info("Dummy in get new actions")
-    def hook_pre_scheduler_mod_start(self, scheduler):
-        logger.info("Dummy in hook pre-scheduler")
-    def hook_scheduler_tick(self, scheduler):
-        logger.info("Dummy in hook scheduler tick")
-    def hook_tick(self, scheduler):
-        logger.info("Dummy in hook tick")
-    def do_loop_turn(self, scheduler):
-        logger.info("Dummy in loop turn")
-        time.sleep(0.1)
+    # ----------
+    def update_retention_objects(self, daemon):
+        """ Update retention date """
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    def load_retention_objects(self, daemon):
+        """ Self daemon objects retention - avoid using this! """
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    def hook_load_retention(self, daemon):
+        """This function is called by the daemon to restore the objects live state """
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    def hook_save_retention(self, daemon):
+        """This function is called before daemon exit to save the objects live state """
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    def hook_get_new_actions(self, daemon):
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
+    def hook_pre_scheduler_mod_start(self, daemon):
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
+    def hook_scheduler_tick(self, daemon):
+        logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    # Already defined
+    # def hook_tick(self, daemon):
+    #     logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    # Already defined
+    # def do_loop_turn(self, daemon):
+    #     logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    # ----------
     # Broker only module
-    def load_retention(self):
-        logger.info("Dummy in load retention")
-    def hook_tick(self):
-        logger.info("Dummy in hook tick")
-    def main(self):
-        pass
-    def hook_pre_scheduler_mod_start(self):
-        logger.info("Dummy in hook pre-scheduler")
-    def do_loop_turn(self):
-        logger.info("Dummy in loop turn")
-        time.sleep(0.1)
+    # ----------
+    # Already defined
+    # def hook_tick(self):
+    #     logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
+
+    # Already defined
+    # def do_loop_turn(self, daemon):
+    #     logger.info("Test - Example in %s for daemon: %s", inspect.stack()[0][3], daemon)
 
-    ## managing all broks
+    # managing all broks
     def manage_brok(self, brok):
+        """ This function is called as soon as a brok is received """
+        logger.info("Test - Example in %s, got brok type: %s", inspect.stack()[0][3], brok.type)
+
+    """
+        Broks types:
+        - log
+        - monitoring_log
+
+        - notification_raise
+        - downtime_raise
+        - initial_host_status, initial_service_status, initial_contact_status
+        - initial_broks_done
+
+        - update_host_status, update_service_status, initial_contact_status
+        - host_check_result, service_check_result
+        - host_next_schedule, service_next_scheduler
+        - host_snapshot, service_snapshot
+        - unknown_host_check_result, unknown_service_check_result
+
+        - program_status
+        - clean_all_my_instance_id
+
+        - new_conf
+    """
+    # managing one specific type type of brok
+    def manage_log_brok(self, brok):
+        """Deprecated ..."""
+        pass
+
+    def manage_monitoring_log_brok(self, brok):
         pass
 
-    brok_types = [
-        "arbiter", "brok", "broker", "businessimpactmodulation",
-        "check", "checkmodulation", "CommandCall", "contact",
-        "contactgroup", "escalation", "eventhandler",
-        "externalcommand", "host", "hostdependency",
-        "hostescalation", "hostextinfo", "hostgroup",
-        "macromodulation", "macroresolver", "message", "module",
-        "notification", "notificationway", "command", "config",
-        "servicedependency", "pack", "poller", "reactionner",
-        "realm", "receiver", "resultmodulation", "scheduler",
-        "service", "serviceescalation", "serviceextinfo",
-        "servicegroup", "timeperiod", "trigger",
-    ]
-
-    ## managing one type of brok
     def manage_clean_all_my_instance_id_brok(self, brok):
         pass
+
     def manage_downtime_raise_brok(self, brok):
         pass
+
     def manage_initial_broks_done_brok(self, brok):
         pass
+
     def manage_notification_raise_brok(self, brok):
         pass
+
     def manage_program_status_brok(self, brok):
         pass
-    def  manage_unknown_host_check_result_brok(self, broker):
+
+    def manage_unknown_host_check_result_brok(self, brok):
         pass
-    def  manage_unknown_service_check_result_brok(self, broker):
+
+    def manage_unknown_service_check_result_brok(self, brok):
         pass
 
     def manage_initial_host_status_brok(self, brok):
         pass
+
     def manage_initial_service_status_brok(self, brok):
         pass
 
     def manage_host_check_result_brok(self, brok):
         pass
+
     def manage_service_check_result_brok(self, brok):
         pass
 
     def manage_host_next_schedule_brok(self, brok):
         pass
+
     def manage_service_next_schedule_brok(self, brok):
         pass
 
     def manage_host_snapshot_brok(self, brok):
         pass
+
     def manage_service_snapshot_brok(self, brok):
         pass
 
     def manage_update_host_status_brok(self, brok):
         pass
+
     def manage_update_service_status_brok(self, brok):
         pass
 
diff --git a/setup.py b/setup.py
index afaafae..0938757 100755
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ def run(self):
 
 # Build list of all installable package files
 (data_files, to_be_parsed_files, to_be_installed_files) = get_files(
-    alignak_cfg, manifest["__pkg_name__"], manifest["__module_type__"], module=True
+    alignak_cfg, manifest["__pkg_name__"], manifest["__module_types__"], module=True
 )
 
 setup(
@@ -71,7 +71,7 @@ def run(self):
     # Metadata for PyPI
     author=manifest["__author__"],
     author_email=manifest["__author_email__"],
-    keywords="alignak monitoring module " + manifest["__module_type__"],
+    keywords="alignak monitoring module " + manifest["__module_types__"],
     url=manifest["__url__"],
     license=manifest["__license__"],
     description=manifest["__description__"],
diff --git a/test/alignak_test.py b/test/alignak_test.py
new file mode 100644
index 0000000..4cbe066
--- /dev/null
+++ b/test/alignak_test.py
@@ -0,0 +1,779 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015-2016: Alignak team, see AUTHORS.txt file for contributors
+#
+# This file is part of Alignak.
+#
+# Alignak is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Alignak is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Alignak.  If not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is used to test host- and service-downtimes.
+#
+
+import sys
+from sys import __stdout__
+from functools import partial
+
+import time
+import datetime
+import os
+import string
+import re
+import random
+import copy
+import locale
+import socket
+
+import unittest2 as unittest
+
+import logging
+from logging import Handler
+
+import alignak
+from alignak.log import DEFAULT_FORMATTER_NAMED, ROOT_LOGGER_NAME
+from alignak.objects.config import Config
+from alignak.objects.command import Command
+from alignak.objects.module import Module
+
+from alignak.dispatcher import Dispatcher
+from alignak.scheduler import Scheduler
+from alignak.macroresolver import MacroResolver
+from alignak.external_command import ExternalCommandManager, ExternalCommand
+from alignak.check import Check
+from alignak.message import Message
+from alignak.objects.arbiterlink import ArbiterLink
+from alignak.objects.schedulerlink import SchedulerLink
+from alignak.objects.pollerlink import PollerLink
+from alignak.objects.reactionnerlink import ReactionnerLink
+from alignak.objects.brokerlink import BrokerLink
+from alignak.objects.satellitelink import SatelliteLink
+from alignak.notification import Notification
+from alignak.modulesmanager import ModulesManager
+from alignak.basemodule import BaseModule
+
+from alignak.brok import Brok
+from alignak.misc.common import DICT_MODATTR
+
+from alignak.daemons.schedulerdaemon import Alignak
+from alignak.daemons.brokerdaemon import Broker
+from alignak.daemons.arbiterdaemon import Arbiter
+from alignak.daemons.receiverdaemon import Receiver
+from logging import ERROR
+
+from alignak_tst_utils import safe_print
+
+# Modules are by default on the ../modules
+myself = os.path.abspath(__file__)
+
+
+#############################################################################
+# We overwrite the functions time() and sleep()
+# This way we can modify sleep() so that it immediately returns although
+# for a following time() it looks like thee was actually a delay.
+# This massively speeds up the tests.
+
+
+class TimeHacker(object):
+
+    def __init__(self):
+        self.my_offset = 0
+        self.my_starttime = time.time()
+        self.my_oldtime = time.time
+        self.original_time_time = time.time
+        self.original_time_sleep = time.sleep
+        self.in_real_time = True
+
+    def my_time_time(self):
+        return self.my_oldtime() + self.my_offset
+
+    def my_time_sleep(self, delay):
+        self.my_offset += delay
+
+    def time_warp(self, duration):
+        self.my_offset += duration
+
+    def set_my_time(self):
+        if self.in_real_time:
+            time.time = self.my_time_time
+            time.sleep = self.my_time_sleep
+            self.in_real_time = False
+
+# If external processes or time stamps for files are involved, we must
+# revert the fake timing routines, because these externals cannot be fooled.
+# They get their times from the operating system.
+    def set_real_time(self):
+        if not self.in_real_time:
+            time.time = self.original_time_time
+            time.sleep = self.original_time_sleep
+            self.in_real_time = True
+
+
+class Pluginconf(object):
+    pass
+
+
+class CollectorHandler(Handler):
+    """
+    This log handler collecting all emitted log.
+
+    Used for tet purpose (assertion)
+    """
+
+    def __init__(self):
+        Handler.__init__(self, logging.DEBUG)
+        self.collector = []
+
+    def emit(self, record):
+        try:
+            msg = self.format(record)
+            self.collector.append(msg)
+        except TypeError:
+            self.handleError(record)
+
+
+class AlignakTest(unittest.TestCase):
+
+    time_hacker = TimeHacker()
+    maxDiff = None
+
+    if sys.version_info < (2, 7):
+        def assertRegex(self, *args, **kwargs):
+            return self.assertRegexpMatches(*args, **kwargs)
+
+    def setup_with_file(self, configuration_file):
+        """
+        Load alignak with defined configuration file
+
+        If the configuration loading fails, a SystemExit exception is raised to the caller.
+
+        The conf_is_correct property indicates if the configuration loading succeeded or failed.
+
+        The configuration errors property contains a list of the error message that are normally
+        logged as ERROR by the arbiter.
+
+        @verified
+
+        :param configuration_file: path + file name of the main configuration file
+        :type configuration_file: str
+        :return: None
+        """
+        self.broks = {}
+        self.schedulers = {}
+        self.brokers = {}
+        self.pollers = {}
+        self.receivers = {}
+        self.reactionners = {}
+        self.arbiter = None
+        self.conf_is_correct = False
+        self.configuration_warnings = []
+        self.configuration_errors = []
+        self.logger = logging.getLogger("alignak")
+
+        # Add collector for test purpose.
+        collector_h = CollectorHandler()
+        collector_h.setFormatter(DEFAULT_FORMATTER_NAMED)
+        self.logger.addHandler(collector_h)
+
+        # Initialize the Arbiter with no daemon configuration file
+        self.arbiter = Arbiter(None, [configuration_file], False, False, False, False,
+                              '/tmp/arbiter.log', 'arbiter-master')
+
+        try:
+            # The following is copy paste from setup_alignak_logger
+            # The only difference is that keep logger at INFO level to gather messages
+            # This is needed to assert later on logs we received.
+            self.logger.setLevel(logging.INFO)
+            # Force the debug level if the daemon is said to start with such level
+            if self.arbiter.debug:
+                self.logger.setLevel(logging.DEBUG)
+
+            # Log will be broks
+            for line in self.arbiter.get_header():
+                self.logger.info(line)
+
+            self.arbiter.load_monitoring_config_file()
+
+            # If this assertion does not match, then there is a bug in the arbiter :)
+            self.assertTrue(self.arbiter.conf.conf_is_correct)
+            self.conf_is_correct = True
+            self.configuration_warnings = self.arbiter.conf.configuration_warnings
+            self.configuration_errors = self.arbiter.conf.configuration_errors
+        except SystemExit:
+            self.configuration_warnings = self.arbiter.conf.configuration_warnings
+            print("Configuration warnings:")
+            for msg in self.configuration_warnings:
+                print(" - %s" % msg)
+            self.configuration_errors = self.arbiter.conf.configuration_errors
+            print("Configuration errors:")
+            for msg in self.configuration_errors:
+                print(" - %s" % msg)
+            raise
+
+        for arb in self.arbiter.conf.arbiters:
+            if arb.get_name() == self.arbiter.config_name:
+                self.arbiter.myself = arb
+        self.arbiter.dispatcher = Dispatcher(self.arbiter.conf, self.arbiter.myself)
+        self.arbiter.dispatcher.prepare_dispatch()
+
+        # Build schedulers dictionary with the schedulers involved in the configuration
+        for scheduler in self.arbiter.dispatcher.schedulers:
+            sched = Alignak([], False, False, True, '/tmp/scheduler.log')
+            sched.load_modules_manager()
+            sched.new_conf = scheduler.conf_package
+            if sched.new_conf:
+                sched.setup_new_conf()
+            self.schedulers[scheduler.scheduler_name] = sched
+
+        # Build pollers dictionary with the pollers involved in the configuration
+        for poller in self.arbiter.dispatcher.pollers:
+            self.pollers[poller.poller_name] = poller
+
+        # Build receivers dictionary with the receivers involved in the configuration
+        for receiver in self.arbiter.dispatcher.receivers:
+            self.receivers[receiver.receiver_name] = receiver
+
+        # Build reactionners dictionary with the reactionners involved in the configuration
+        for reactionner in self.arbiter.dispatcher.reactionners:
+            self.reactionners[reactionner.reactionner_name] = reactionner
+
+        # Build brokers dictionary with the brokers involved in the configuration
+        for broker in self.arbiter.dispatcher.brokers:
+            self.brokers[broker.broker_name] = broker
+
+        # No current need of such a dictionary for the other daemons types...
+        # but it may be easiy completed!
+
+    def add(self, b):
+        if isinstance(b, Brok):
+            self.broks[b.uuid] = b
+            return
+        if isinstance(b, ExternalCommand):
+            self.schedulers['scheduler-master'].run_external_command(b.cmd_line)
+
+    def fake_check(self, ref, exit_status, output="OK"):
+        # print "fake", ref
+        now = time.time()
+        check = ref.schedule(self.schedulers['scheduler-master'].sched.hosts, self.schedulers['scheduler-master'].sched.services, self.schedulers['scheduler-master'].sched.timeperiods,
+                             self.schedulers['scheduler-master'].sched.macromodulations, self.schedulers['scheduler-master'].sched.checkmodulations,
+                             self.schedulers['scheduler-master'].sched.checks, force=True)
+        # now checks are schedule and we get them in
+        # the action queue
+        # check = ref.actions.pop()
+        self.schedulers['scheduler-master'].sched.add(check)  # check is now in sched.checks[]
+        # check = self.schedulers['scheduler-master'].sched.checks[ref.checks_in_progress[0]]
+
+        # Allows to force check scheduling without setting its status nor
+        # output. Useful for manual business rules rescheduling, for instance.
+        if exit_status is None:
+            return
+
+        # fake execution
+        check.check_time = now
+
+        # and lie about when we will launch it because
+        # if not, the schedule call for ref
+        # will not really reschedule it because there
+        # is a valid value in the future
+        ref.next_chk = now - 0.5
+
+        check.get_outputs(output, 9000)
+        check.exit_status = exit_status
+        check.execution_time = 0.001
+        check.status = 'waitconsume'
+        self.schedulers['scheduler-master'].sched.waiting_results.put(check)
+
+    def scheduler_loop(self, count, items, mysched=None):
+        """
+        Manage scheduler checks
+
+        @verified
+
+        :param count: number of checks to pass
+        :type count: int
+        :param items: list of list [[object, exist_status, output]]
+        :type items: list
+        :param mysched: The scheduler
+        :type mysched: None | object
+        :return: None
+        """
+        if mysched is None:
+            mysched = self.schedulers['scheduler-master']
+
+        macroresolver = MacroResolver()
+        macroresolver.init(mysched.conf)
+
+        for num in range(count):
+            for item in items:
+                (obj, exit_status, output) = item
+                if len(obj.checks_in_progress) == 0:
+                    for i in mysched.sched.recurrent_works:
+                        (name, fun, nb_ticks) = mysched.sched.recurrent_works[i]
+                        if nb_ticks == 1:
+                            fun()
+                self.assertGreater(len(obj.checks_in_progress), 0)
+                chk = mysched.sched.checks[obj.checks_in_progress[0]]
+                chk.set_type_active()
+                chk.output = output
+                chk.exit_status = exit_status
+                mysched.sched.waiting_results.put(chk)
+
+            for i in mysched.sched.recurrent_works:
+                (name, fun, nb_ticks) = mysched.sched.recurrent_works[i]
+                if nb_ticks == 1:
+                    fun()
+
+    def external_command_loop(self):
+        """
+        Execute the scheduler actions for external commands.
+
+        @verified
+        :return:
+        """
+        for i in self.schedulers['scheduler-master'].sched.recurrent_works:
+            (name, fun, nb_ticks) = self.schedulers['scheduler-master'].sched.recurrent_works[i]
+            if nb_ticks == 1:
+                fun()
+        self.assert_no_log_match("External command Brok could not be sent to any daemon!")
+
+    def worker_loop(self, verbose=True):
+        self.schedulers['scheduler-master'].sched.delete_zombie_checks()
+        self.schedulers['scheduler-master'].sched.delete_zombie_actions()
+        checks = self.schedulers['scheduler-master'].sched.get_to_run_checks(True, False, worker_name='tester')
+        actions = self.schedulers['scheduler-master'].sched.get_to_run_checks(False, True, worker_name='tester')
+        # print "------------ worker loop checks ----------------"
+        # print checks
+        # print "------------ worker loop actions ----------------"
+        if verbose is True:
+            self.show_actions()
+        # print "------------ worker loop new ----------------"
+        for a in actions:
+            a.status = 'inpoller'
+            a.check_time = time.time()
+            a.exit_status = 0
+            self.schedulers['scheduler-master'].sched.put_results(a)
+        if verbose is True:
+            self.show_actions()
+        # print "------------ worker loop end ----------------"
+
+    def show_logs(self, scheduler=False):
+        """
+        Show logs. Get logs collected by the collector handler and print them
+
+        @verified
+        :param scheduler:
+        :return:
+        """
+        print "--- logs <<<----------------------------------"
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+        for log in collector_h.collector:
+            safe_print(log)
+
+        print "--- logs >>>----------------------------------"
+
+    def show_actions(self):
+        print "--- actions <<<----------------------------------"
+        actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time)
+        for a in actions:
+            if a.is_a == 'notification':
+                item = self.scheduler.sched.find_item_by_id(a.ref)
+                if item.my_type == "host":
+                    ref = "host: %s" % item.get_name()
+                else:
+                    hst = self.scheduler.sched.find_item_by_id(item.host)
+                    ref = "host: %s svc: %s" % (hst.get_name(), item.get_name())
+                print "NOTIFICATION %s %s %s %s %s" % (a.uuid, ref, a.type,
+                                                       time.asctime(time.localtime(a.t_to_go)),
+                                                       a.status)
+            elif a.is_a == 'eventhandler':
+                print "EVENTHANDLER:", a
+        print "--- actions >>>----------------------------------"
+
+    def show_checks(self):
+        """
+        Show checks from the scheduler
+        :return:
+        """
+        print "--- checks <<<--------------------------------"
+
+        for check in self.schedulers['scheduler-master'].sched.checks.values():
+            print("- %s" % check)
+        print "--- checks >>>--------------------------------"
+
+    def show_and_clear_logs(self):
+        """
+        Prints and then deletes the current logs stored in the log collector
+
+        @verified
+        :return:
+        """
+        self.show_logs()
+        self.clear_logs()
+
+    def show_and_clear_actions(self):
+        self.show_actions()
+        self.clear_actions()
+
+    def count_logs(self):
+        """
+        Count the log lines in the Arbiter broks.
+        If 'scheduler' is True, then uses the scheduler's broks list.
+
+        @verified
+        :return:
+        """
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+        return len(collector_h.collector)
+
+    def count_actions(self):
+        """
+        Count the actions in the scheduler's actions.
+
+        @verified
+        :return:
+        """
+        return len(self.schedulers['scheduler-master'].sched.actions.values())
+
+    def clear_logs(self):
+        """
+        Remove all the logs stored in the logs collector
+
+        @verified
+        :return:
+        """
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+        collector_h.collector = []
+
+    def clear_actions(self):
+        """
+        Clear the actions in the scheduler's actions.
+
+        @verified
+        :return:
+        """
+        self.schedulers['scheduler-master'].sched.actions = {}
+
+    def assert_actions_count(self, number):
+        """
+        Check the number of actions
+
+        @verified
+
+        :param number: number of actions we must have
+        :type number: int
+        :return: None
+        """
+        print("Actions: %s" % self.schedulers['scheduler-master'].sched.actions)
+        actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time)
+        self.assertEqual(number, len(self.schedulers['scheduler-master'].sched.actions),
+                         "Not found expected number of actions:\nactions_logs=[[[\n%s\n]]]" %
+                         ('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, planned: %s, '
+                                    'command: %s' %
+                                    (idx, b.creation_time, b.is_a, b.type, b.status, b.t_to_go, b.command)
+                                    for idx, b in enumerate(actions))))
+
+    def assert_actions_match(self, index, pattern, field):
+        """
+        Check if pattern verified in field(property) name of the action with index in action list
+
+        @verified
+
+        :param index: index number of actions list
+        :type index: int
+        :param pattern: pattern to verify is in the action
+        :type pattern: str
+        :param field: name of the field (property) of the action
+        :type field: str
+        :return: None
+        """
+        regex = re.compile(pattern)
+        actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time)
+        myaction = actions[index]
+        self.assertTrue(regex.search(getattr(myaction, field)),
+                        "Not found a matching patternin actions:\nindex=%s field=%s pattern=%r\n"
+                        "action_line=creation: %s, is_a: %s, type: %s, status: %s, planned: %s, "
+                        "command: %s" % (
+                            index, field, pattern, myaction.creation_time, myaction.is_a,
+                            myaction.type, myaction.status, myaction.t_to_go, myaction.command))
+
+    def assert_log_match(self, pattern, index=None):
+        """
+        Search if the log with the index number has the pattern in the Arbiter logs.
+
+        If index is None, then all the collected logs are searched for the pattern
+
+        Logs numbering starts from 0 (the oldest stored log line)
+
+        This function assert on the search result. As of it, if no log is found with th search
+        criteria an assertion is raised and the test stops on error.
+
+        :param pattern: string to search in log
+        :type pattern: str
+        :param index: index number
+        :type index: int
+        :return: None
+        """
+        self.assertIsNotNone(pattern, "Searched pattern can not be None!")
+
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+
+        regex = re.compile(pattern)
+        log_num = 0
+
+        found = False
+        for log in collector_h.collector:
+            if index is None:
+                if regex.search(log):
+                    found = True
+                    break
+            elif index == log_num:
+                if regex.search(log):
+                    found = True
+                    break
+            log_num += 1
+
+        self.assertTrue(found,
+                        "Not found a matching log line in logs:\nindex=%s pattern=%r\n"
+                        "logs=[[[\n%s\n]]]" % (
+                            index, pattern, '\n'.join('\t%s=%s' % (idx, b.strip())
+                                                      for idx, b in enumerate(collector_h.collector)
+                                                      )
+                            )
+                        )
+
+    def assert_checks_count(self, number):
+        """
+        Check the number of actions
+
+        @verified
+
+        :param number: number of actions we must have
+        :type number: int
+        :return: None
+        """
+        checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time)
+        self.assertEqual(number, len(checks),
+                         "Not found expected number of checks:\nchecks_logs=[[[\n%s\n]]]" %
+                         ('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, planned: %s, '
+                                    'command: %s' %
+                                    (idx, b.creation_time, b.is_a, b.type, b.status, b.t_to_go, b.command)
+                                    for idx, b in enumerate(checks))))
+
+    def assert_checks_match(self, index, pattern, field):
+        """
+        Check if pattern verified in field(property) name of the check with index in check list
+
+        @verified
+
+        :param index: index number of checks list
+        :type index: int
+        :param pattern: pattern to verify is in the check
+        :type pattern: str
+        :param field: name of the field (property) of the check
+        :type field: str
+        :return: None
+        """
+        regex = re.compile(pattern)
+        checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time)
+        mycheck = checks[index]
+        self.assertTrue(regex.search(getattr(mycheck, field)),
+                        "Not found a matching pattern in checks:\nindex=%s field=%s pattern=%r\n"
+                        "check_line=creation: %s, is_a: %s, type: %s, status: %s, planned: %s, "
+                        "command: %s" % (
+                            index, field, pattern, mycheck.creation_time, mycheck.is_a,
+                            mycheck.type, mycheck.status, mycheck.t_to_go, mycheck.command))
+
+    def _any_check_match(self, pattern, field, assert_not):
+        """
+        Search if any chek matches the requested pattern
+
+        @verified
+        :param pattern:
+        :param field to search with pattern:
+        :param assert_not:
+        :return:
+        """
+        regex = re.compile(pattern)
+        checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time)
+        for check in checks:
+            if re.search(regex, getattr(check, field)):
+                self.assertTrue(not assert_not,
+                                "Found check:\nfield=%s pattern=%r\n"
+                                "check_line=creation: %s, is_a: %s, type: %s, status: %s, "
+                                "planned: %s, command: %s" % (
+                                    field, pattern, check.creation_time, check.is_a,
+                                    check.type, check.status, check.t_to_go, check.command)
+                                )
+                return
+        self.assertTrue(assert_not, "No matching check found:\n"
+                                    "pattern = %r\n" "checks = %r" % (pattern, checks))
+
+    def assert_any_check_match(self, pattern, field):
+        """
+        Assert if any check matches the pattern
+
+        @verified
+        :param pattern:
+        :param field to search with pattern:
+        :return:
+        """
+        self._any_check_match(pattern, field, assert_not=False)
+
+    def assert_no_check_match(self, pattern, field):
+        """
+        Assert if no check matches the pattern
+
+        @verified
+        :param pattern:
+        :param field to search with pattern:
+        :return:
+        """
+        self._any_check_match(pattern, field, assert_not=True)
+
+    def _any_log_match(self, pattern, assert_not):
+        """
+        Search if any log in the Arbiter logs matches the requested pattern
+        If 'scheduler' is True, then uses the scheduler's broks list.
+
+        @verified
+        :param pattern:
+        :param assert_not:
+        :return:
+        """
+        regex = re.compile(pattern)
+
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+
+        for log in collector_h.collector:
+            if re.search(regex, log):
+                self.assertTrue(not assert_not,
+                                "Found matching log line:\n"
+                                "pattern = %r\nbrok log = %r" % (pattern, log))
+                return
+
+        self.assertTrue(assert_not, "No matching log line found:\n"
+                                    "pattern = %r\n" "logs broks = %r" % (pattern,
+                                                                          collector_h.collector))
+
+    def assert_any_log_match(self, pattern):
+        """
+        Assert if any log (Arbiter or Scheduler if True) matches the pattern
+
+        @verified
+        :param pattern:
+        :param scheduler:
+        :return:
+        """
+        self._any_log_match(pattern, assert_not=False)
+
+    def assert_no_log_match(self, pattern):
+        """
+        Assert if no log (Arbiter or Scheduler if True) matches the pattern
+
+        @verified
+        :param pattern:
+        :param scheduler:
+        :return:
+        """
+        self._any_log_match(pattern, assert_not=True)
+
+    def get_log_match(self, pattern):
+        regex = re.compile(pattern)
+        res = []
+        collector_h = [hand for hand in self.logger.handlers
+                       if isinstance(hand, CollectorHandler)][0]
+
+        for log in collector_h.collector:
+            if re.search(regex, log):
+                res.append(log)
+        return res
+
+    def print_header(self):
+        print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#"
+        print "#" + string.center(self.id(), 78) + "#"
+        print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"
+
+    def xtest_conf_is_correct(self):
+        self.print_header()
+        self.assertTrue(self.conf.conf_is_correct)
+
+    def show_configuration_logs(self):
+        """
+        Prints the configuration logs
+
+        @verified
+        :return:
+        """
+        print("Configuration warnings:")
+        for msg in self.configuration_warnings:
+            print(" - %s" % msg)
+        print("Configuration errors:")
+        for msg in self.configuration_errors:
+            print(" - %s" % msg)
+
+    def _any_cfg_log_match(self, pattern, assert_not):
+        """
+        Search a pattern in configuration log (warning and error)
+
+        @verified
+        :param pattern:
+        :return:
+        """
+        regex = re.compile(pattern)
+
+        cfg_logs = self.configuration_warnings + self.configuration_errors
+
+        for log in cfg_logs:
+            if re.search(regex, log):
+                self.assertTrue(not assert_not,
+                                "Found matching log line:\n"
+                                "pattern = %r\nlog = %r" % (pattern, log))
+                return
+
+        self.assertTrue(assert_not, "No matching log line found:\n"
+                                    "pattern = %r\n" "logs = %r" % (pattern, cfg_logs))
+
+    def assert_any_cfg_log_match(self, pattern):
+        """
+        Assert if any configuration log matches the pattern
+
+        @verified
+        :param pattern:
+        :return:
+        """
+        self._any_cfg_log_match(pattern, assert_not=False)
+
+    def assert_no_cfg_log_match(self, pattern):
+        """
+        Assert if no configuration log matches the pattern
+
+        @verified
+        :param pattern:
+        :return:
+        """
+        self._any_cfg_log_match(pattern, assert_not=True)
+
+
+ShinkenTest = AlignakTest
+
+# Time hacking for every test!
+time_hacker = AlignakTest.time_hacker
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/test/alignak_tst_utils.py b/test/alignak_tst_utils.py
new file mode 100644
index 0000000..f0630e4
--- /dev/null
+++ b/test/alignak_tst_utils.py
@@ -0,0 +1,79 @@
+"""
+Module (could be made a package eventually) to contain misc
+little helper functions (and not having hidden side-effects or such things)
+used more specifically in the tests.
+"""
+
+import locale
+import socket
+import sys
+
+from sys import __stdout__
+
+
+if sys.version_info[:2] < (2, 7):
+    import unittest2 as unittest
+    from ordereddict import OrderedDict
+else:
+    import unittest
+    from collections import OrderedDict
+
+
+
+def get_free_port(on_ip='127.0.0.1'):
+    sock = socket.socket()
+    try:
+        sock.bind((on_ip, 0))
+        return sock.getsockname()[1]
+    finally:
+        sock.close()
+
+
+def guess_sys_stdout_encoding():
+    ''' Return the best guessed encoding to be used for printing on sys.stdout. '''
+    return (
+           getattr(sys.stdout, 'encoding', None)
+        or getattr(__stdout__, 'encoding', None)
+        or locale.getpreferredencoding()
+        or sys.getdefaultencoding()
+        or 'ascii'
+    )
+
+
+def safe_print(*args, **kw):
+    """" "print" args to sys.stdout,
+    If some of the args aren't unicode then convert them first to unicode,
+        using keyword argument 'in_encoding' if provided (else default to UTF8)
+        and replacing bad encoded bytes.
+    Write to stdout using 'out_encoding' if provided else best guessed encoding,
+        doing xmlcharrefreplace on errors.
+    """
+    in_bytes_encoding = kw.pop('in_encoding', 'UTF-8')
+    out_encoding = kw.pop('out_encoding', guess_sys_stdout_encoding())
+    if kw:
+        raise ValueError('unhandled named/keyword argument(s): %r' % kw)
+    #
+    make_in_data_gen = lambda: ( a if isinstance(a, unicode)
+                                else
+                            unicode(str(a), in_bytes_encoding, 'replace')
+                        for a in args )
+
+    possible_codings = ( out_encoding, )
+    if out_encoding != 'ascii':
+        possible_codings += ( 'ascii', )
+
+    for coding in possible_codings:
+        data = u' '.join(make_in_data_gen()).encode(coding, 'xmlcharrefreplace')
+        try:
+            sys.stdout.write(data)
+            break
+        except UnicodeError as err:
+            # there might still have some problem with the underlying sys.stdout.
+            # it might be a StringIO whose content could be decoded/encoded in this same process
+            # and have encode/decode errors because we could have guessed a bad encoding with it.
+            # in such case fallback on 'ascii'
+            if coding == 'ascii':
+                raise
+            sys.stderr.write('Error on write to sys.stdout with %s encoding: err=%s\nTrying with ascii' % (
+                coding, err))
+    sys.stdout.write(b'\n')
diff --git a/test/cfg/cfg_default.cfg b/test/cfg/cfg_default.cfg
new file mode 100644
index 0000000..98a29e5
--- /dev/null
+++ b/test/cfg/cfg_default.cfg
@@ -0,0 +1,3 @@
+cfg_dir=default
+# Use this specific version for test
+cfg_file=mod-example.cfg
\ No newline at end of file
diff --git a/test/cfg/default/commands.cfg b/test/cfg/default/commands.cfg
new file mode 100644
index 0000000..c1924d6
--- /dev/null
+++ b/test/cfg/default/commands.cfg
@@ -0,0 +1,30 @@
+define command{
+    command_name    check-host-alive
+    command_line    $USER1$/test_hostcheck.pl --type=$ARG1$ --failchance=2% --previous-state=$HOSTSTATE$ --state-duration=$HOSTDURATIONSEC$ --hostname $HOSTNAME$
+}
+define command{
+    command_name    check-host-alive-parent
+    command_line    $USER1$/test_hostcheck.pl --type=$ARG1$ --failchance=2% --previous-state=$HOSTSTATE$ --state-duration=$HOSTDURATIONSEC$ --parent-state=$ARG2$ --hostname $HOSTNAME$
+}
+define command{
+    command_name    notify-host
+    #command_line    sleep 1 && /bin/true
+    command_line    $USER1$/notifier.pl --hostname $HOSTNAME$ --notificationtype $NOTIFICATIONTYPE$ --hoststate $HOSTSTATE$ --hostoutput $HOSTOUTPUT$ --longdatetime $LONGDATETIME$ --hostattempt $HOSTATTEMPT$ --hoststatetype $HOSTSTATETYPE$
+}
+define command{
+    command_name    notify-service
+    command_line    $USER1$/notifier.pl --hostname $HOSTNAME$ --servicedesc $SERVICEDESC$ --notificationtype $NOTIFICATIONTYPE$ --servicestate $SERVICESTATE$ --serviceoutput $SERVICEOUTPUT$ --longdatetime $LONGDATETIME$ --serviceattempt $SERVICEATTEMPT$ --servicestatetype $SERVICESTATETYPE$
+    #command_line    sleep 1 && /bin/true
+}
+define command{
+    command_name    check_service
+    command_line    $USER1$/test_servicecheck.pl --type=$ARG1$ --failchance=5% --previous-state=$SERVICESTATE$ --state-duration=$SERVICEDURATIONSEC$ --total-critical-on-host=$TOTALHOSTSERVICESCRITICAL$ --total-warning-on-host=$TOTALHOSTSERVICESWARNING$ --hostname $HOSTNAME$ --servicedesc $SERVICEDESC$
+}
+define command{
+  command_name eventhandler
+  command_line    $USER1$/test_eventhandler.pl $SERVICESTATE$ $SERVICESTATETYPE$ $SERVICEATTEMPT$
+}
+define command{
+  command_name    special_macro
+  command_line	  $USER1$/nothing $ARG1$
+}
diff --git a/test/cfg/default/contacts.cfg b/test/cfg/default/contacts.cfg
new file mode 100644
index 0000000..25d0dcc
--- /dev/null
+++ b/test/cfg/default/contacts.cfg
@@ -0,0 +1,19 @@
+define contactgroup{
+    contactgroup_name       test_contact
+    alias                   test_contacts_alias
+    members                 test_contact
+}
+
+define contact{
+    contact_name                    test_contact
+    alias                           test_contact_alias
+    service_notification_period     24x7
+    host_notification_period        24x7
+    service_notification_options    w,u,c,r,f
+    host_notification_options       d,u,r,f,s
+    service_notification_commands   notify-service
+    host_notification_commands      notify-host
+    email                           nobody@localhost
+    can_submit_commands             1
+    contactgroups                   another_contact_test
+}
diff --git a/test/cfg/default/daemons/arbiter-master.cfg b/test/cfg/default/daemons/arbiter-master.cfg
new file mode 100644
index 0000000..5dffbc1
--- /dev/null
+++ b/test/cfg/default/daemons/arbiter-master.cfg
@@ -0,0 +1,43 @@
+#===============================================================================
+# ARBITER
+#===============================================================================
+# Description: The Arbiter is responsible for:
+# - Loading, manipulating and dispatching the configuration
+# - Validating the health of all other Alignak daemons
+# - Issuing global directives to Alignak daemons (kill, activate-spare, etc.)
+# https://alignak.readthedocs.org/en/latest/08_configobjects/arbiter.html
+#===============================================================================
+# IMPORTANT: If you use several arbiters you MUST set the host_name on each
+# servers to its real DNS name ('hostname' command).
+#===============================================================================
+define arbiter {
+    arbiter_name            arbiter-master
+    #host_name              node1           ; CHANGE THIS if you have several Arbiters (like with a spare)
+    address                 127.0.0.1
+    port                    7770
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    ## Interesting modules:
+    # - backend_arbiter     = get the monitored objects configuration from the Alignak backend
+    modules example
+
+    ## Optional parameters:
+    ## Uncomment these lines in a HA architecture so the master and slaves know
+    ## how long they may wait for each other.
+    #timeout                 3   ; Ping timeout
+    #data_timeout            120 ; Data send timeout
+    #max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    #check_interval          60  ; Ping node every N seconds
+
+    # In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+}
diff --git a/test/cfg/default/daemons/broker-master.cfg b/test/cfg/default/daemons/broker-master.cfg
new file mode 100644
index 0000000..15d5a16
--- /dev/null
+++ b/test/cfg/default/daemons/broker-master.cfg
@@ -0,0 +1,47 @@
+#===============================================================================
+# BROKER (S1_Broker)
+#===============================================================================
+# Description: The broker is responsible for:
+# - Exporting centralized logs of all Alignak daemon processes
+# - Exporting status data
+# - Exporting performance data
+# - Exposing Alignak APIs:
+#   - Status data
+#   - Performance data
+#   - Configuration data
+#   - Command interface
+# https://alignak.readthedocs.org/en/latest/08_configobjects/broker.html
+#===============================================================================
+define broker {
+    broker_name             broker-master
+    address                 127.0.0.1
+    port                    7772
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    # Interesting modules that can be used:
+    # - backend_broker      = update the live state in the Alignak backend
+    modules example
+
+    ## Optional parameters:
+    timeout                 3   ; Ping timeout
+    data_timeout            120 ; Data send timeout
+    max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    check_interval          60  ; Ping node every N seconds
+
+    # In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+
+    ## Advanced parameters:
+    manage_arbiters         1   ; Take data from Arbiter. There should be only one
+                                ; broker for the arbiter.
+    manage_sub_realms       0   ; Does it take jobs from schedulers of sub-Realms?
+}
diff --git a/test/cfg/default/daemons/poller-master.cfg b/test/cfg/default/daemons/poller-master.cfg
new file mode 100644
index 0000000..ba4b9d1
--- /dev/null
+++ b/test/cfg/default/daemons/poller-master.cfg
@@ -0,0 +1,57 @@
+#===============================================================================
+# POLLER (S1_Poller)
+#===============================================================================
+# Description: The poller is responsible for:
+# - Active data acquisition
+# - Local passive data acquisition
+# https://alignak.readthedocs.org/en/latest/08_configobjects/poller.html
+#===============================================================================
+define poller {
+    poller_name             poller-master
+    address                 127.0.0.1
+    port                    7771
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    ## Interesting modules:
+    # - booster-nrpe        = Replaces the check_nrpe binary. Therefore it
+    #                       enhances performances when there are lot of NRPE
+    #                       calls.
+    # - named-pipe          = Allow the poller to read a nagios.cmd named pipe.
+    #                       This permits the use of distributed check_mk checks
+    #                       should you desire it.
+    # - snmp-booster        = Snmp bulk polling module
+    modules example
+
+    ## Optional parameters:
+    timeout                 3   ; Ping timeout
+    data_timeout            120 ; Data send timeout
+    max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    check_interval          60  ; Ping node every N seconds
+
+    ## In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+
+    ## Advanced parameters:
+    manage_sub_realms       0   ; Does it take jobs from schedulers of sub-Realms?
+    min_workers             0   ; Starts with N processes (0 = 1 per CPU)
+    max_workers             0   ; No more than N processes (0 = 1 per CPU)
+    processes_by_worker     256 ; Each worker manages N checks
+    polling_interval        1   ; Get jobs from schedulers each N seconds
+
+    #passive                0   ; For DMZ monitoring, set to 1 so the connections
+                                ; will be from scheduler -> poller.
+
+    # Poller tags are the tag that the poller will manage. Use None as tag name to manage
+    # untagged checks
+    #poller_tags             None
+}
diff --git a/test/cfg/default/daemons/reactionner-master.cfg b/test/cfg/default/daemons/reactionner-master.cfg
new file mode 100644
index 0000000..4487dc2
--- /dev/null
+++ b/test/cfg/default/daemons/reactionner-master.cfg
@@ -0,0 +1,45 @@
+#===============================================================================
+# REACTIONNER (S1_Reactionner)
+#===============================================================================
+# Description: The reactionner is responsible for:
+# - Executing notification actions
+# - Executing event handler actions
+# https://alignak.readthedocs.org/en/latest/08_configobjects/reactionner.html
+#===============================================================================
+define reactionner {
+    reactionner_name        reactionner-master
+    address                 127.0.0.1
+    port                    7769
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    # Interesting modules that can be used:
+    modules example
+
+    ## Optional parameters:
+    timeout                 3   ; Ping timeout
+    data_timeout            120 ; Data send timeout
+    max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    check_interval          60  ; Ping node every N seconds
+
+    # In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+
+    ## Advanced parameters:
+    manage_sub_realms       0   ; Does it take jobs from schedulers of sub-Realms?
+    min_workers             1   ; Starts with N processes (0 = 1 per CPU)
+    max_workers             15  ; No more than N processes (0 = 1 per CPU)
+    polling_interval        1   ; Get jobs from schedulers each 1 second
+
+    # Reactionner tags are the tag that the reactionner will manage. Use None as tag name to manage
+    # untagged notification/event handlers
+    #reactionner_tags        None
+}
diff --git a/test/cfg/default/daemons/receiver-master.cfg b/test/cfg/default/daemons/receiver-master.cfg
new file mode 100644
index 0000000..7e1518a
--- /dev/null
+++ b/test/cfg/default/daemons/receiver-master.cfg
@@ -0,0 +1,41 @@
+#===============================================================================
+# RECEIVER
+#===============================================================================
+# The receiver manages passive information. It's just a "buffer" which will
+# load passive modules (like NSCA) and be read by the arbiter to dispatch data.
+#===============================================================================
+define receiver {
+    receiver_name           receiver-master
+    address                 127.0.0.1
+    port                    7773
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    # Interesting modules that can be used:
+    # - nsca                = NSCA protocol server for collecting passive checks
+    modules example
+
+    ## Optional parameters
+    timeout                 3   ; Ping timeout
+    data_timeout            120 ; Data send timeout
+    max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    check_interval          60  ; Ping node every N seconds
+
+    # In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+
+    ## Advanced Feature
+    direct_routing          1   ; If enabled, it will directly send commands to the
+                                ; schedulers if it knows about the hostname in the
+                                ; command.
+                                ; If not the arbiter will get the information from
+                                ; the receiver.
+}
diff --git a/test/cfg/default/daemons/scheduler-master.cfg b/test/cfg/default/daemons/scheduler-master.cfg
new file mode 100644
index 0000000..add152d
--- /dev/null
+++ b/test/cfg/default/daemons/scheduler-master.cfg
@@ -0,0 +1,54 @@
+#===============================================================================
+# SCHEDULER (S1_Scheduler)
+#===============================================================================
+# The scheduler is a "Host manager". It gets the hosts and their services,
+# schedules the checks and transmit them to the pollers.
+# Description: The scheduler is responsible for:
+# - Creating the dependancy tree
+# - Scheduling checks
+# - Calculating states
+# - Requesting actions from a reactionner
+# - Buffering and forwarding results its associated broker
+# https://alignak.readthedocs.org/en/latest/08_configobjects/scheduler.html
+#===============================================================================
+define scheduler {
+    scheduler_name          scheduler-master
+    address                 127.0.0.1
+    port                    7768
+
+    ## Realm
+    #realm                   All
+
+    ## Modules
+    # Default: None
+    # Interesting modules that can be used:
+    # - backend_scheduler   = store the live state in the Alignak backend (retention)
+    modules example
+
+    ## Optional parameters:
+    timeout                 3   ; Ping timeout
+    data_timeout            120 ; Data send timeout
+    max_check_attempts      3   ; If ping fails N or more, then the node is dead
+    check_interval          60  ; Ping node every N seconds
+
+    # In a HA architecture this daemon can be a spare
+    spare                   0   ; 1 = is a spare, 0 = is not a spare
+
+    # Enable https or not
+    use_ssl	                0
+    # enable certificate/hostname check, will avoid man in the middle attacks
+    hard_ssl_name_check     0
+
+    ## Advanced Features:
+    # Skip initial broks creation. Boot fast, but some broker modules won't
+    # work with it! (like livestatus for example)
+    skip_initial_broks      0
+
+    # Some schedulers can manage more hosts than others
+    weight                  1
+
+    # In NATted environments, you declare each satellite ip[:port] as seen by
+    # *this* scheduler (if port not set, the port declared by satellite itself
+    # is used)
+    #satellitemap    poller-1=1.2.3.4:7771, reactionner-1=1.2.3.5:7769, ...
+}
diff --git a/test/cfg/default/hostgroups.cfg b/test/cfg/default/hostgroups.cfg
new file mode 100644
index 0000000..b1858d3
--- /dev/null
+++ b/test/cfg/default/hostgroups.cfg
@@ -0,0 +1,61 @@
+
+define hostgroup {
+    hostgroup_name          router
+    alias                   All Router Hosts
+}
+
+define hostgroup {
+    hostgroup_name          hostgroup_01
+    alias                   hostgroup_alias_01
+}
+
+define hostgroup {
+    hostgroup_name          hostgroup_02
+    alias                   hostgroup_alias_02
+}
+
+define hostgroup {
+    hostgroup_name          hostgroup_03
+    alias                   hostgroup_alias_03
+}
+
+define hostgroup {
+    hostgroup_name          hostgroup_04
+    alias                   hostgroup_alias_04
+}
+
+define hostgroup {
+    hostgroup_name          hostgroup_05
+    alias                   hostgroup_alias_05
+}
+
+define hostgroup {
+    hostgroup_name          up
+    alias                   All Up Hosts
+}
+
+define hostgroup {
+    hostgroup_name          down
+    alias                   All Down Hosts
+}
+
+define hostgroup {
+    hostgroup_name          pending
+    alias                   All Pending Hosts
+}
+
+define hostgroup {
+    hostgroup_name          random
+    alias                   All Random Hosts
+}
+
+define hostgroup {
+    hostgroup_name          flap
+    alias                   All Flapping Hosts
+}
+
+define hostgroup {
+    hostgroup_name          allhosts
+    alias                   All Hosts
+    members                 test_router_0,test_host_0
+}
diff --git a/test/cfg/default/hosts.cfg b/test/cfg/default/hosts.cfg
new file mode 100644
index 0000000..1926050
--- /dev/null
+++ b/test/cfg/default/hosts.cfg
@@ -0,0 +1,53 @@
+define host{
+  check_interval                 1
+  check_period                   24x7
+  contact_groups                 test_contact
+  event_handler_enabled          1
+  failure_prediction_enabled     1
+  flap_detection_enabled         1
+  max_check_attempts             3
+  name                           generic-host
+  notification_interval          1
+  notification_options           d,u,r,f,s
+  notification_period            24x7
+  notifications_enabled          1
+  process_perf_data              1
+  register                       0
+  retain_nonstatus_information   1
+  retain_status_information      1
+  retry_interval                 1
+  notes_url                      /alignak/wiki/doku.php/$HOSTNAME$
+  action_url                     /alignak/pnp/index.php?host=$HOSTNAME$
+}
+
+define host{
+  action_url                     http://search.cpan.org/dist/Monitoring-Generator-TestConfig/
+  address                        127.0.0.1
+  alias                          flap_0
+  check_command                  check-host-alive!flap
+  check_period                   24x7
+  host_name                      test_router_0
+  hostgroups                     router
+  icon_image                     ../../docs/images/switch.png?host=$HOSTNAME$
+  icon_image_alt                 icon alt string
+  notes                          just a notes string
+  notes_url                      http://search.cpan.org/dist/Monitoring-Generator-TestConfig/README
+  use                            generic-host
+}
+
+define host{
+  address                        127.0.0.1
+  alias                          up_0
+  check_command                  check-host-alive-parent!up!$HOSTSTATE:test_router_0$
+  event_handler                  eventhandler
+  check_period                   24x7
+  host_name                      test_host_0
+  hostgroups                     hostgroup_01,up
+  parents                        test_router_0
+  use                            generic-host
+  criticity			 5
+  _ostype			 gnulinux
+  _oslicense			 gpl
+  ; address6 is not implemented in Alignak
+  ; address6			 ::1
+}
diff --git a/test/cfg/default/realm.cfg b/test/cfg/default/realm.cfg
new file mode 100644
index 0000000..6d83ca7
--- /dev/null
+++ b/test/cfg/default/realm.cfg
@@ -0,0 +1,6 @@
+# Very advanced feature for multisite management.
+# Read the docs VERY CAREFULLY before changing these settings :)
+define realm {
+    realm_name  All
+    default     1
+}
diff --git a/test/cfg/default/servicegroups.cfg b/test/cfg/default/servicegroups.cfg
new file mode 100644
index 0000000..8357e3a
--- /dev/null
+++ b/test/cfg/default/servicegroups.cfg
@@ -0,0 +1,61 @@
+
+define servicegroup {
+    servicegroup_name       servicegroup_01
+    alias                   servicegroup_alias_01
+}
+
+define servicegroup {
+    servicegroup_name       servicegroup_02
+    alias                   servicegroup_alias_02
+    members                 test_host_0,test_ok_0
+}
+
+define servicegroup {
+    servicegroup_name       servicegroup_03
+    alias                   servicegroup_alias_03
+}
+
+define servicegroup {
+    servicegroup_name       servicegroup_04
+    alias                   servicegroup_alias_04
+}
+
+define servicegroup {
+    servicegroup_name       servicegroup_05
+    alias                   servicegroup_alias_05
+}
+
+define servicegroup {
+    servicegroup_name       ok
+    alias                   All Ok Services
+}
+
+define servicegroup {
+    servicegroup_name       warning
+    alias                   All Warning Services
+}
+
+define servicegroup {
+    servicegroup_name       unknown
+    alias                   All Unknown Services
+}
+
+define servicegroup {
+    servicegroup_name       critical
+    alias                   All Critical Services
+}
+
+define servicegroup {
+    servicegroup_name       pending
+    alias                   All Pending Services
+}
+
+define servicegroup {
+    servicegroup_name       random
+    alias                   All Random Services
+}
+
+define servicegroup {
+    servicegroup_name       flap
+    alias                   All Flapping Services
+}
diff --git a/test/cfg/default/services.cfg b/test/cfg/default/services.cfg
new file mode 100644
index 0000000..52ec9ec
--- /dev/null
+++ b/test/cfg/default/services.cfg
@@ -0,0 +1,43 @@
+define service{
+  active_checks_enabled          1
+  check_freshness                0
+  check_interval                 1
+  check_period                   24x7
+  contact_groups                 test_contact
+  event_handler_enabled          1
+  failure_prediction_enabled     1
+  flap_detection_enabled         0
+  is_volatile                    0
+  max_check_attempts             2
+  name                           generic-service
+  notification_interval          1
+  notification_options           w,u,c,r,f,s
+  notification_period            24x7
+  notifications_enabled          1
+  obsess_over_service            1
+  parallelize_check              1
+  passive_checks_enabled         1
+  process_perf_data              1
+  register                       0
+  retain_nonstatus_information   1
+  retain_status_information      1
+  retry_interval                 1
+}
+
+define service{
+  active_checks_enabled          1
+  check_command                  check_service!ok
+  check_interval                 1
+  host_name                      test_host_0
+  icon_image                     ../../docs/images/tip.gif?host=$HOSTNAME$&srv=$SERVICEDESC$
+  icon_image_alt                 icon alt string
+  notes                          just a notes string
+  retry_interval                 1
+  service_description            test_ok_0
+  servicegroups                  servicegroup_01,ok
+  use                            generic-service
+  event_handler                  eventhandler
+  notes_url                      /alignak/wiki/doku.php/$HOSTNAME$/$SERVICEDESC$
+  action_url                     /alignak/pnp/index.php?host=$HOSTNAME$&srv=$SERVICEDESC$
+  _custname			 custvalue
+}
diff --git a/test/cfg/default/timeperiods.cfg b/test/cfg/default/timeperiods.cfg
new file mode 100644
index 0000000..48da73c
--- /dev/null
+++ b/test/cfg/default/timeperiods.cfg
@@ -0,0 +1,16 @@
+define timeperiod{
+    timeperiod_name 24x7
+    alias           24 Hours A Day, 7 Days A Week
+    sunday          00:00-24:00
+    monday          00:00-24:00
+    tuesday         00:00-24:00
+    wednesday       00:00-24:00
+    thursday        00:00-24:00
+    friday          00:00-24:00
+    saturday        00:00-24:00
+}
+
+define timeperiod{
+	timeperiod_name	none
+	alias		No Time Is A Good Time
+}
\ No newline at end of file
diff --git a/test/cfg/mod-example.cfg b/test/cfg/mod-example.cfg
new file mode 100644
index 0000000..8c91279
--- /dev/null
+++ b/test/cfg/mod-example.cfg
@@ -0,0 +1,17 @@
+## Module:      EXAMPLE
+## Loaded by:   Broker
+# .....
+define module {
+    # Module alias to use as a module identifier
+    module_alias            example
+    # List of module types (see readme.rst)
+    module_types            example
+    # Python module name
+    python_name             alignak_module_example
+
+    # Module configuration parameters
+    # ---
+    option_1                foo
+    option_2                bar
+    option_3                foobar
+}
diff --git a/test/requirements.txt b/test/requirements.txt
new file mode 100644
index 0000000..f30ff55
--- /dev/null
+++ b/test/requirements.txt
@@ -0,0 +1,13 @@
+# Python requirements for unit tests
+-r ../requirements.txt
+# alignak
+-e git+https://github.com/Alignak-monitoring/alignak.git@develop#egg=alignak
+
+unittest2
+coveralls
+nose-cov
+coverage==0.4.0
+nose
+pylint
+pep8
+pep257
diff --git a/test/setup_test.sh b/test/setup_test.sh
new file mode 100755
index 0000000..c3195ee
--- /dev/null
+++ b/test/setup_test.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015-2016: Alignak team, see AUTHORS.txt file for contributors
+#
+# This file is part of Alignak.
+#
+# Alignak is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Alignak is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Alignak.  If not, see <http://www.gnu.org/licenses/>.
+
+set -e
+
+THIS_PATH=$(dirname "$0")
+BASE_PATH=$(dirname "$THIS_PATH")
+
+cd $BASE_PATH
+
+echo 'Upgrade pip ...'
+pip install --upgrade pip
+
+echo 'Installing application requirements ...'
+pip install -r requirements.txt
+echo 'Installing application in development mode ...'
+pip install -e .
+echo 'Installing tests requirements ...'
+pip install --upgrade -r test/requirements.txt
+
+pyversion=$(python -c "import sys; print(''.join(map(str, sys.version_info[:2])))")
+if test -e "test/requirements.py${pyversion}.txt"
+then
+    pip install -r "test/requirements.py${pyversion}.txt"
+fi
+
diff --git a/test/test_module.py b/test/test_module.py
new file mode 100644
index 0000000..7db618b
--- /dev/null
+++ b/test/test_module.py
@@ -0,0 +1,330 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015-2016: Alignak team, see AUTHORS.txt file for contributors
+#
+# This file is part of Alignak.
+#
+# Alignak is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Alignak is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Alignak.  If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+Test the module
+"""
+
+import re
+import time
+
+import requests
+
+from alignak_test import AlignakTest, time_hacker
+from alignak.modulesmanager import ModulesManager
+from alignak.objects.module import Module
+from alignak.basemodule import BaseModule
+
+import alignak_module_example
+
+
+class TestModules(AlignakTest):
+    """
+    This class contains the tests for the module
+    """
+
+    def test_module_loading(self):
+        """
+        Alignak module loading
+
+        :return:
+        """
+        self.print_header()
+        self.setup_with_file('./cfg/cfg_default.cfg')
+        self.assertTrue(self.conf_is_correct)
+        self.show_configuration_logs()
+
+        # No arbiter modules created
+        modules = [m.module_alias for m in self.arbiter.myself.modules]
+        self.assertListEqual(modules, ['example'])
+
+        # The only existing broker module is logs declared in the configuration
+        modules = [m.module_alias for m in self.brokers['broker-master'].modules]
+        self.assertListEqual(modules, ['example'])
+
+        # No poller module
+        modules = [m.module_alias for m in self.pollers['poller-master'].modules]
+        self.assertListEqual(modules, ['example'])
+
+        # No receiver module
+        modules = [m.module_alias for m in self.receivers['receiver-master'].modules]
+        self.assertListEqual(modules, ['example'])
+
+        # No reactionner module
+        modules = [m.module_alias for m in self.reactionners['reactionner-master'].modules]
+        self.assertListEqual(modules, ['example'])
+
+        # No scheduler modules
+        modules = [m.module_alias for m in self.schedulers['scheduler-master'].modules]
+        self.assertListEqual(modules, ['example'])
+
+    def test_module_manager(self):
+        """
+        Test if the module manager manages correctly all the modules
+        :return:
+        """
+        self.print_header()
+        self.setup_with_file('cfg/cfg_default.cfg')
+        self.assertTrue(self.conf_is_correct)
+
+        time_hacker.set_real_time()
+
+        # Create an Alignak module
+        mod = Module({
+            'module_alias': 'example',
+            'module_types': 'example',
+            'python_name': 'alignak_module_example',
+        })
+
+        # Create the modules manager for a daemon type
+        self.modulemanager = ModulesManager('receiver', None)
+
+        # Load and initialize the modules:
+        #  - load python module
+        #  - get module properties and instances
+        self.modulemanager.load_and_init([mod])
+
+        # Loading module logs
+        self.assert_any_log_match(re.escape(
+            "Importing Python module 'alignak_module_example' for example..."
+        ))
+        self.assert_any_log_match(re.escape(
+            "Module properties: {'daemons': "
+            "['arbiter', 'broker', 'scheduler', 'poller', 'receiver', 'reactionner'], "
+            "'phases': ['configuration', 'late_configuration', 'running', 'retention'], "
+            "'type': 'example', 'external': True}"
+        ))
+        self.assert_any_log_match(re.escape(
+            "Imported 'alignak_module_example' for example"
+        ))
+        self.assert_any_log_match(re.escape(
+            "Loaded Python module 'alignak_module_example' (example)"
+        ))
+        self.assert_any_log_match(re.escape(
+            "Give an instance of alignak_module_example for alias: example"
+        ))
+
+        my_module = self.modulemanager.instances[0]
+
+        # Get list of not external modules
+        self.assertListEqual([], self.modulemanager.get_internal_instances())
+        for phase in ['configuration', 'late_configuration', 'running', 'retention']:
+            self.assertListEqual([], self.modulemanager.get_internal_instances(phase))
+
+        # Get list of external modules
+        self.assertListEqual([my_module], self.modulemanager.get_external_instances())
+        for phase in ['configuration', 'late_configuration', 'retention']:
+            self.assertListEqual([my_module], self.modulemanager.get_external_instances(phase))
+        for phase in ['running']:
+            self.assertListEqual([my_module], self.modulemanager.get_external_instances(phase))
+
+        # Clear logs
+        self.clear_logs()
+
+        # Start external modules
+        self.modulemanager.start_external_instances()
+
+        # Starting external module logs
+        self.assert_log_match("Trying to initialize module: example", 0)
+        self.assert_log_match(re.escape("Test - Example in init"), 1)
+        self.assert_log_match("Initialization of the example module", 2)
+        self.assert_log_match("Starting external module example", 3)
+        self.assert_log_match("Starting external process for module example", 4)
+        self.assert_log_match("example is now started", 5)
+
+        # Check alive
+        self.assertIsNotNone(my_module.process)
+        self.assertTrue(my_module.process.is_alive())
+
+        # Clear logs
+        self.clear_logs()
+
+        # Kill the external module (normal stop is .stop_process)
+        my_module.kill()
+        time.sleep(0.1)
+        self.assert_log_match("Killing external module", 0)
+        self.assert_log_match("External module killed", 1)
+
+        # Should be dead (not normally stopped...) but we still know a process for this module!
+        self.assertIsNotNone(my_module.process)
+
+        # Nothing special ...
+        self.modulemanager.check_alive_instances()
+        self.assert_log_match("The external module example died unexpectedly!", 2)
+        self.assert_log_match("Setting the module example to restart", 3)
+
+        # Try to restart the dead modules
+        self.modulemanager.try_to_restart_deads()
+        self.assert_log_match("Trying to initialize module: example", 4)
+
+        # In fact it's too early, so it won't do it
+        # The module instance is still dead
+        self.assertFalse(my_module.process.is_alive())
+
+        # So we lie, on the restart tries ...
+        my_module.last_init_try = -5
+        self.modulemanager.check_alive_instances()
+        self.modulemanager.try_to_restart_deads()
+        self.assert_log_match("Trying to initialize module: example", 5)
+        self.assert_log_match(re.escape("Test - Example in init"), 6)
+        self.assert_log_match("Initialization of the example module", 7)
+
+        # The module instance is now alive again
+        self.assertTrue(my_module.process.is_alive())
+        self.assert_log_match("I'm stopping module 'example'", 8)
+        self.assert_log_match("Starting external process for module example", 9)
+        self.assert_log_match("example is now started", 10)
+
+        # There is nothing else to restart in the module manager
+        self.assertEqual([], self.modulemanager.to_restart)
+
+        # Clear logs
+        self.clear_logs()
+
+        # Now we look for time restart so we kill it again
+        my_module.kill()
+        time.sleep(0.2)
+        self.assertFalse(my_module.process.is_alive())
+        self.assert_log_match("Killing external module", 0)
+        self.assert_log_match("External module killed", 1)
+
+        # Should be too early
+        self.modulemanager.check_alive_instances()
+        self.assert_log_match("The external module example died unexpectedly!", 2)
+        self.assert_log_match("Setting the module example to restart", 3)
+
+        self.modulemanager.try_to_restart_deads()
+        self.assert_log_match("Trying to initialize module: example", 4)
+
+        # In fact it's too early, so it won't do it
+        # The module instance is still dead
+        self.assertFalse(my_module.process.is_alive())
+
+        # So we lie, on the restart tries ...
+        my_module.last_init_try = -5
+        self.modulemanager.check_alive_instances()
+        self.modulemanager.try_to_restart_deads()
+        self.assert_log_match("Trying to initialize module: example", 5)
+        self.assert_log_match(re.escape("Test - Example in init"), 6)
+        self.assert_log_match("Initialization of the example module", 7)
+
+        # The module instance is now alive again
+        self.assertTrue(my_module.process.is_alive())
+        self.assert_log_match("I'm stopping module 'example'", 8)
+        self.assert_log_match("Starting external process for module example", 9)
+        self.assert_log_match("example is now started", 10)
+
+        # And we clear all now
+        self.modulemanager.stop_all()
+        # Stopping module logs
+
+        self.assert_log_match("Request external process to stop for example", 11)
+        self.assert_log_match(re.escape("I'm stopping module 'example' (pid="), 12)
+        self.assert_log_match(
+            re.escape("'example' is still alive after normal kill, I help it to die"), 13
+        )
+        self.assert_log_match("Killing external module ", 14)
+        self.assert_log_match("External module killed", 15)
+        self.assert_log_match("External process stopped.", 16)
+
+    def test_module_start_default(self):
+        """
+        Test the module initialization function, no parameters, using default
+        :return:
+        """
+        self.print_header()
+        # Obliged to call to get a self.logger...
+        self.setup_with_file('cfg/cfg_default.cfg')
+        self.assertTrue(self.conf_is_correct)
+
+        # -----
+        # Default initialization
+        # -----
+        # Clear logs
+        self.clear_logs()
+
+        # Create an Alignak module
+        mod = Module({
+            'module_alias': 'example',
+            'module_types': 'example',
+            'python_name': 'alignak_module_example'
+        })
+
+        instance = alignak_module_example.get_instance(mod)
+        self.assertIsInstance(instance, BaseModule)
+
+        self.assert_log_match(
+            re.escape("Give an instance of alignak_module_example for "
+                      "alias: example"), 0)
+
+    def test_module_start_parameters(self):
+        """
+        Test the module initialization function, no parameters, provide parameters
+        :return:
+        """
+        self.print_header()
+        # Obliged to call to get a self.logger...
+        self.setup_with_file('cfg/cfg_default.cfg')
+        self.assertTrue(self.conf_is_correct)
+
+        # -----
+        # Provide parameters
+        # -----
+        # Clear logs
+        self.clear_logs()
+
+        # Create an Alignak module
+        mod = Module({
+            'module_alias': 'example',
+            'module_types': 'example',
+            'python_name': 'alignak_module_example',
+            'option1': 'foo',
+            'option2': 'bar',
+            'option3': 'foobar'
+        })
+
+        instance = alignak_module_example.get_instance(mod)
+        self.assertIsInstance(instance, BaseModule)
+
+        self.assert_log_match(
+            re.escape("Give an instance of alignak_module_example for "
+                      "alias: example"), 0)
+        self.assert_log_match(
+            re.escape("configuration, foo, bar, foobar"), 1)
+
+    def test_module_zzz_run(self):
+        """
+        Test the module while running
+        :return:
+        """
+        self.print_header()
+        # Obliged to call to get a self.logger...
+        self.setup_with_file('cfg/cfg_default.cfg')
+        self.assertTrue(self.conf_is_correct)
+
+        # -----
+        # Provide parameters - logger configuration file (exists)
+        # -----
+        # Clear logs
+        self.clear_logs()
+
+        """
+        To be done !
+        """
diff --git a/version.py b/version.py
index 5e085d4..cf08f54 100755
--- a/version.py
+++ b/version.py
@@ -9,13 +9,13 @@
 # Module type for PyPI keywords
 # Used for:
 # - PyPI keywords
-__module_type__ = u"broker"
+__module_types__ = u"example"
 
 # Application manifest
-__version__ = u"0.1.0"
+__version__ = u"0.2.0"
 __author__ = u"Alignak team"
 __author_email__ = u"frederic.mohier@gmail.com"
 __copyright__ = u"(c) 2015-2016 - %s" % __author__
 __license__ = u"GNU Affero General Public License, version 3"
-__url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-module-EXAMPLE"
-__description__ = u"Alignak - Broker module for ...."
+__url__ = u"https://github.com/Alignak-monitoring/alignak-module-example"
+__description__ = u"Alignak - Alignak module as an example to build other modules"